blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
86ef3bfdb2fb1c55d0cd8cefb61f7f2d7e42c78c
|
009a73adacda072e6241965ff0c589e1fff92aa4
|
/CreateConnections.py
|
92293021753f3c40cb89638383829da70da84cfb
|
[] |
no_license
|
bernhardkaplan/OculomotorControl
|
0dc24095c813a25856ed9556b8a250e10952e88e
|
ac0b5261a3c3617e3f72f9ffd8e5515270f80fb9
|
refs/heads/master
| 2021-01-01T17:22:19.117464
| 2013-09-03T10:53:32
| 2013-09-03T10:53:32
| 10,267,811
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 347
|
py
|
class CreateConnections(object):
def __init__(self, params):
self.params = params
def connect_mt_to_bg(self, src_net, tgt_net):
"""
The NEST simulation should run for some pre-fixed time
Keyword arguments:
src_net, tgt_net -- the source and the target network
"""
pass
|
[
"Bernhard.Kaplan@gmail.com"
] |
Bernhard.Kaplan@gmail.com
|
b7c78da890d1c759f77537a7e6faae7e4377540e
|
8e53fa0b67e2268b912ad09a41356b622fff715d
|
/uniquee.py
|
e60f09ef22cdf16401ac8e1c5abde45851359d09
|
[] |
no_license
|
Dhathri29/Guvi-Sessions
|
a0962212e8f6e95429de101f2b03bd3ab500baee
|
3a0a6c78b82420b518eca167e4a7c79c75e1d6f0
|
refs/heads/master
| 2020-04-30T15:34:21.454160
| 2019-07-23T12:01:47
| 2019-07-23T12:01:47
| 176,923,992
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 350
|
py
|
def Repeat(x):
_size=len(x)
repeated=[]
for i in range(_size):
k=i+1
for j in range(k,_size):
if x[i]==x[j] and x[i] not in repeated:
repeated.append(x[i])
return repeated
repeated.sort()
print(repeated)
n=int(input())
list1=list(map(int,input().split()))
print (Repeat(list1))
|
[
"noreply@github.com"
] |
noreply@github.com
|
f6813e579cbf76ee872102859d44f28c4c47746b
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03107/s767358209.py
|
f9a07c556a610f1f56bccfb4d8bc42ed0285d230
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 83
|
py
|
s = input()
red = s.count("0")
blue = s.count("1")
num = min(red,blue)
print(num*2)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
ec1d8c4d661870efcce6dd2ea0b18baee2087b45
|
f21109a5c23340447d0e3d34f14299c30e49d023
|
/Dynamic Programming/11. Longest Common Subsequence.py
|
a8f0e898a3fad5f7001ac206032d7ee02a013de3
|
[] |
no_license
|
ShashankSinha98/FAANG-Questions
|
45366004c3176a3c11ef554a25a11fe21e53ebca
|
73ef742b3747e89d32d384baa6acf35044bf3ce0
|
refs/heads/master
| 2022-12-21T09:42:51.796086
| 2020-09-24T08:24:47
| 2020-09-24T08:24:47
| 286,765,370
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 598
|
py
|
t = int(input())
def common_lcs(str1,n,str2,m):
dp = [[0]*(m+1) for i in range(n+1)]
for i in range(1,n+1):
for j in range(1,m+1):
if str1[i-1]==str2[j-1]:
dp[i][j] = dp[i-1][j-1] + 1
else:
dp[i][j] = max(dp[i-1][j],dp[i][j-1])
return dp[n][m]
def display(arr):
for i in arr:
for j in i:
print(j,end=" ")
print()
print()
while t!=0:
t-=1
n,m = [int(i) for i in input().split()]
str1 = input()
str2 = input()
res = common_lcs(str1,n,str2,m)
print(res)
|
[
"34626597+ShashankSinha98@users.noreply.github.com"
] |
34626597+ShashankSinha98@users.noreply.github.com
|
446d6d7faa595deb53a808126c8a2aced62533ca
|
00b86f883694b17575a514227960b963d3b6179b
|
/Analysis/python/regions.py
|
fd5293018c7e89c2e26d88fe5e64bddca3efeb61
|
[] |
no_license
|
HephyAnalysisSW/TTZRun2EFT
|
1b33a6bad49d0d6e119e49c74faa35dee0e4bb0e
|
730a7465d4cbde52649965ed0e2a5b29bcc309c3
|
refs/heads/master
| 2020-04-30T16:40:46.454225
| 2019-04-18T08:09:46
| 2019-04-18T08:09:46
| 176,956,090
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,950
|
py
|
from TTZRun2EFT.Analysis.Region import Region
from TTZRun2EFT.Analysis.Region import texString
from TTZRun2EFT.Analysis.Region import allowedVars
from math import pi
def getRegionsFromThresholds(var, vals, gtLastThreshold = True):
return [Region(var, (vals[i], vals[i+1])) for i in range(len(vals)-1)]
def getRegions2D(varOne, varOneThresholds, varTwo, varTwoThresholds):
regions_varOne = getRegionsFromThresholds(varOne, varOneThresholds)
regions_varTwo = getRegionsFromThresholds(varTwo, varTwoThresholds)
regions2D = []
for r1 in regions_varOne:
for r2 in regions_varTwo:
regions2D.append(r1+r2)
return regions2D
def simpleStringToDict( simpleString ):
# replace variables by a string not containing "_"
for i, var in enumerate(allowedVars):
simpleString = simpleString.replace(var, "var%i"%i)
cutList = simpleString.split("_")
# convert simpleString to threshold tuple, fill in dict
cutDict = {}
for cut in cutList:
for i, var in enumerate(allowedVars):
if "var"+str(i) in cut:
cutRange = cut.replace("var%i"%i, "")
cutRange = cutRange.split("To")
cutRange = tuple( map( float, cutRange ) )
if len(cutRange) == 1: cutRange = ( cutRange[0], -1 )
cutDict.update( {var:cutRange} )
return cutDict
def dictToCutString( dict ):
res=[]
for var in dict.keys():
svar = var
s1=svar+">="+str(dict[var][0])
if dict[var][1]>-1: s1+="&&"+svar+"<"+str(dict[var][1])
res.append(s1)
return "&&".join(res)
def simpleStringToCutString( cutString ):
return dictToCutString( simpleStringToDict( cutString ) )
#Put all sets of regions that are used in the analysis, closure, tables, etc.
#differencial
thresholds = [ 20, 120, 220, 320, 420, -999 ]
genTTZRegions = getRegionsFromThresholds( "GenPhoton_pt[0]", thresholds )
|
[
"lukas.k.lechner@gmail.com"
] |
lukas.k.lechner@gmail.com
|
ebb96a9ed8fe8b1ad75429c27bcb2733a7ca3183
|
f4dcbcdfbafae47b8db5ef62701cc001bf044827
|
/utils.py
|
f71c46607d7a835be15ca1bcfe05e32c91240f45
|
[] |
no_license
|
sheriffab/Machine-learning
|
116dfed45aed4a889167b46566a12097e742ccb1
|
873177d2586a8843a9bd0ea0bec3bfaf4bb7806b
|
refs/heads/main
| 2023-06-11T08:33:41.101922
| 2021-06-25T04:18:41
| 2021-06-25T04:18:41
| 380,121,940
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,250
|
py
|
import pandas as pd
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LogisticRegression
from tensorflow import keras
from tensorflow.keras import layers
from sklearn.preprocessing import OneHotEncoder
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
def oneHotEncodeData(data_df):
# Make sure names are similar
data_df['t1_playerid'] = data_df['t1_playerid'].str.lower().str.strip().str.replace(" ","_")
data_df['t2_playerid'] = data_df['t2_playerid'].str.lower().str.strip().replace(" ","_")
data_df['t1p1_player'] = data_df['t1p1_player'].str.lower().str.strip().replace(" ","_")
data_df['t1p2_player'] = data_df['t1p2_player'].str.lower().str.strip().replace(" ","_")
data_df['t1p3_player'] = data_df['t1p3_player'].str.lower().str.strip().replace(" ","_")
data_df['t1p4_player'] = data_df['t1p4_player'].str.lower().str.strip().replace(" ","_")
data_df['t1p5_player'] = data_df['t1p5_player'].str.lower().str.strip().replace(" ","_")
data_df['t2p1_player'] = data_df['t2p1_player'].str.lower().str.strip().replace(" ","_")
data_df['t2p2_player'] = data_df['t2p2_player'].str.lower().str.strip().replace(" ","_")
data_df['t2p3_player'] = data_df['t2p3_player'].str.lower().str.strip().replace(" ","_")
data_df['t2p4_player'] = data_df['t2p4_player'].str.lower().str.strip().replace(" ","_")
data_df['t2p5_player'] = data_df['t2p5_player'].str.lower().str.strip().replace(" ","_")
data_df['t1p1_champion'] = data_df['t1p1_champion'].str.lower().str.strip().replace(" ","_")
data_df['t1p2_champion'] = data_df['t1p2_champion'].str.lower().str.strip().replace(" ","_")
data_df['t1p3_champion'] = data_df['t1p3_champion'].str.lower().str.strip().replace(" ","_")
data_df['t1p4_champion'] = data_df['t1p4_champion'].str.lower().str.strip().replace(" ","_")
data_df['t1p5_champion'] = data_df['t1p5_champion'].str.lower().str.strip().replace(" ","_")
data_df['t2p1_champion'] = data_df['t2p1_champion'].str.lower().str.strip().replace(" ","_")
data_df['t2p2_champion'] = data_df['t2p2_champion'].str.lower().str.strip().replace(" ","_")
data_df['t2p3_champion'] = data_df['t2p3_champion'].str.lower().str.strip().replace(" ","_")
data_df['t2p4_champion'] = data_df['t2p4_champion'].str.lower().str.strip().replace(" ","_")
data_df['t2p5_champion'] = data_df['t2p5_champion'].str.lower().str.strip().replace(" ","_")
data_df['t1_ban1'] = data_df['t1_ban1'].str.lower().str.strip().replace(" ","_")
data_df['t1_ban2'] = data_df['t1_ban2'].str.lower().str.strip().replace(" ","_")
data_df['t1_ban3'] = data_df['t1_ban3'].str.lower().str.strip().replace(" ","_")
data_df['t1_ban4'] = data_df['t1_ban4'].str.lower().str.strip().replace(" ","_")
data_df['t1_ban5'] = data_df['t1_ban5'].str.lower().str.strip().replace(" ","_")
data_df['t2_ban1'] = data_df['t2_ban1'].str.lower().str.strip().replace(" ","_")
data_df['t2_ban2'] = data_df['t2_ban2'].str.lower().str.strip().replace(" ","_")
data_df['t2_ban3'] = data_df['t2_ban3'].str.lower().str.strip().replace(" ","_")
data_df['t2_ban4'] = data_df['t2_ban4'].str.lower().str.strip().replace(" ","_")
data_df['t2_ban5'] = data_df['t2_ban5'].str.lower().str.strip().replace(" ","_")
categorical_columns = ['t1_playerid','t2_playerid','t1p1_player','t1p2_player','t1p3_player','t1p4_player',
't1p5_player','t2p1_player','t2p2_player','t2p3_player','t2p4_player','t2p5_player',
't1p1_champion','t1p2_champion','t1p3_champion','t1p4_champion',
't1p5_champion','t2p1_champion','t2p2_champion','t2p3_champion','t2p4_champion','t2p5_champion',
't1_ban1','t1_ban2','t1_ban3','t1_ban4','t1_ban5','t2_ban1','t2_ban2','t2_ban3','t2_ban4','t2_ban5',]
dum_df = pd.get_dummies(data_df, columns=categorical_columns, prefix=categorical_columns)
return dum_df
def piecharts(data_df):
bans = pd.Series(data_df['t1_ban1'])
bans.append(data_df['t1_ban2'])
bans.append(data_df['t1_ban3'])
unique_bans = bans.unique()
ban_count = []
for i in unique_bans:
count = 0
for a in data_df['t1_ban1']:
if(a == i):
count += 1
for b in data_df['t1_ban2']:
if(b == i):
count += 1
for c in data_df['t1_ban3']:
if(c == i):
count += 1
ban_count.append(count)
ban_count_series = pd.Series(ban_count)
ban_count_series.index = unique_bans
plt.figure(figsize=(12,7))
ban_count_series.sort_values(ascending=False)[:10].plot(kind='pie', autopct='%1.1f%%')
plt.title('Top 10 Banned Champions')
plt.ylabel('Champions')
plt.show()
picks = pd.Series(data_df['t1p1_champion'])
picks.append(data_df['t1p2_champion'])
picks.append(data_df['t1p3_champion'])
picks.append(data_df['t1p4_champion'])
picks.append(data_df['t1p5_champion'])
unique_picks = picks.unique()
pick_count = []
for i in unique_picks:
count = 0
for a in data_df['t1_ban1']:
if(a == i):
count += 1
for b in data_df['t1_ban2']:
if(b == i):
count += 1
for c in data_df['t1_ban3']:
if(c == i):
count += 1
pick_count.append(count)
pick_count_series = pd.Series(pick_count)
pick_count_series.index = unique_picks
plt.figure(figsize=(12,7))
pick_count_series.sort_values(ascending=False)[:10].plot(kind='pie', autopct='%1.1f%%')
plt.title('Top 10 Picked Champions')
plt.ylabel('Champions')
plt.show()
def bargraphs(data_df):
total_dragons = data_df.groupby(["t1_playerid"]).t1_dragons.sum() + data_df.groupby(["t2_playerid"]).t2_dragons.sum()
total_dragons.sort_values(ascending=False)[:10].plot(kind='barh')
plt.title('Teams Top 10 Dragon Count')
plt.ylabel('Teams')
plt.show()
total_heralds = data_df.groupby(["t1_playerid"]).t1_heralds.sum() + data_df.groupby(["t2_playerid"]).t2_heralds.sum()
total_heralds.sort_values(ascending=False)[:10].plot(kind='barh')
plt.title('Teams Top 10 Heralds Count')
plt.ylabel('Teams')
plt.show()
total_barons = data_df.groupby(["t1_playerid"]).t1_barons.sum() + data_df.groupby(["t2_playerid"]).t2_barons.sum()
total_barons.sort_values(ascending=False)[:10].plot(kind='barh')
plt.title('Teams Top 10 Barons Count')
plt.ylabel('Teams')
plt.show()
def bargraphs2(data_df):
wins = data_df[data_df['t2_result'] == 1]['t2_playerid'].value_counts() + data_df[data_df['t1_result'] == 1]['t1_playerid'].value_counts()
wins.sort_values(ascending=False)[:10].plot(kind='barh')
plt.title("Number of games won")
plt.show()
def bargraphs3(data_df):
wins = data_df[data_df['t2_result'] == 1]['t2_playerid'].value_counts() + data_df[data_df['t1_result'] == 1]['t1_playerid'].value_counts()
losses = data_df[data_df['t2_result'] == 0]['t2_playerid'].value_counts() + data_df[data_df['t1_result'] == 0]['t1_playerid'].value_counts()
ratio = wins / (losses + wins)
plt.title("Win/loss ratio")
ratio.sort_values(ascending=False)[:15].plot(kind='barh')
def rolling_average(data_df, t1_count_name, t1_objective, t1_avg_objective, t2_count_name, t2_objective, t2_avg_objective):
cummsum(t1_count_name, 't1_playerid', t1_objective, data_df)
cummsum(t2_count_name, 't2_playerid', t2_objective, data_df)
data_df['t1_gamecount'] = data_df.groupby('t1_playerid').cumcount()
data_df[t1_avg_objective] = data_df[t1_count_name]/data_df['t1_gamecount']
data_df[t1_avg_objective] = data_df[t1_avg_objective].fillna(0)
data_df['t2_gamecount'] = data_df.groupby('t2_playerid').cumcount()
data_df[t2_avg_objective] = data_df[t2_count_name]/data_df['t2_gamecount']
data_df[t2_avg_objective] = data_df[t2_avg_objective].fillna(0)
data_df[t1_avg_objective]= data_df[t1_avg_objective].round(2)
data_df[t2_avg_objective]= data_df[t2_avg_objective].round(2)
return data_df
def cummsum(sum_feature, player, player_stats, data):
data[sum_feature] = data.groupby(player)[player_stats].cumsum(axis=0)
data[sum_feature] = data.groupby(player)[sum_feature].shift(1) #lag by 1 so theres only info from previous matches
data[sum_feature].fillna(0,inplace=True)
return data
def rep(new_col, og_col, data):
data[new_col] = data[og_col].replace([0],1)
return data
def kda (player_kda, player_kills, player_assists, player_deaths, data):
data[player_kda] = (data[player_kills] + data[player_assists])/data[player_deaths]
data[player_kda] = data[player_kda].round(2)
return data
def buildLrModel(X_train, Y_train, feature_names):
logistic = LogisticRegression()
log_model = GridSearchCV(logistic, {
'C': [1,10,100],
'max_iter': [25,50,100],
'solver' : ['liblinear','saga'],
'tol' : [0.1,0.2,0.3]
})
log_model.fit(X_train, Y_train)
print(log_model.best_estimator_)
return log_model
def buildNeuralModel(X_train,Y_train,feature_names):
feature_count = len(feature_names)
neural_model = keras.Sequential([
layers.Dense(32, activation='relu', input_shape=[feature_count]),
layers.Dense(32, activation='relu'),
layers.Dense(1, activation='sigmoid')
])
neural_model.compile(
loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'],
)
EPOCHS = 50
neural_model.fit(
X_train,
Y_train,
batch_size=32,
epochs=EPOCHS,
)
return neural_model
def buildRandomForestModel(X_train,Y_train,feature_names):
random_forest= RandomForestClassifier()
random_forest_model = GridSearchCV(random_forest, {
'n_estimators': [10,100,200],
'max_depth': [1,2,5,10],
})
random_forest_model.fit(X_train, Y_train)
return random_forest_model
def addWinRate(data_df,dum_df):
winMap = {}
for item in dum_df.columns:
if 't1_playerid' in item:
winMap[item] = {'wins':[],'totalGames':[]}
if 't2_playerid' in item:
winMap[item] = {'wins':[],'totalGames':[]}
data_df['t1_games_won_so_far'] = 0
data_df['t1__games_played_so_far'] = 0
data_df['t2_games_won_so_far'] = 0
data_df['t2__games_played_so_far'] = 0
for team, values in winMap.items():
team_df = data_df[data_df[team] == 1]
idx = 0
for index, row in team_df.iterrows():
result = 0
if 't1_playerid' in team:
result = row['t1_result']
else:
result = row['t2_result']
laggedIdx = idx
if idx == 0:
values['wins'].append(result)
values['totalGames'].append(1)
if 't1_playerid' in team:
data_df.loc[index,'t1_games_won_so_far'] = 0
data_df.loc[index,'t1_games_played_so_far'] = 0
else:
data_df.loc[index,'t2_games_won_so_far'] = 0
data_df.loc[index,'t2_games_played_so_far'] = 0
else:
values['wins'].append(values['wins'][idx - 1] + result)
values['totalGames'].append(values['totalGames'][idx - 1] + 1)
if 't1_playerid' in team:
data_df.loc[index,'t1_games_won_so_far'] = values['wins'][idx - 1]
data_df.loc[index,'t1_games_played_so_far'] = values['totalGames'][idx - 1]
else:
data_df.loc[index,'t2_games_won_so_far'] = values['wins'][idx - 1]
data_df.loc[index,'t2_games_played_so_far'] = values['totalGames'][idx - 1]
idx = idx + 1
data_df['t1_winrate'] = data_df['t1_games_won_so_far'] / data_df['t1_games_played_so_far']
data_df['t2_winrate'] = data_df['t2_games_won_so_far'] / data_df['t2_games_played_so_far']
data_df['t1_winrate'] = data_df['t1_winrate'].fillna(0)
data_df['t2_winrate'] = data_df['t2_winrate'].fillna(0)
return data_df
|
[
"noreply@github.com"
] |
noreply@github.com
|
5c61c5283583e4f8005ab3374fa0c5dfff7297da
|
7c6096fda1c62882aecde1b585418eee7a5e76da
|
/forums/migrations/0006_questions_tags.py
|
a9ee72ccd5a8068984a1f879b3ff58b0e65b5582
|
[] |
no_license
|
Lokesh-Balla/StackCopy
|
dec2596ce6c68cea6deb498a60e331b280ea4be7
|
05604b8719b301144f295dccad893ab6e170bee9
|
refs/heads/master
| 2023-02-08T04:29:20.070834
| 2020-07-14T17:07:30
| 2020-07-14T17:07:30
| 193,842,871
| 0
| 0
| null | 2023-02-04T05:18:40
| 2019-06-26T06:27:46
|
Python
|
UTF-8
|
Python
| false
| false
| 387
|
py
|
# Generated by Django 2.2.2 on 2019-06-30 05:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('forums', '0005_answers_user'),
]
operations = [
migrations.AddField(
model_name='questions',
name='tags',
field=models.ManyToManyField(to='forums.Tags'),
),
]
|
[
"Lokesh-Balla@users.noreply.github.com"
] |
Lokesh-Balla@users.noreply.github.com
|
b19d04a16672a6e82ef0ac5031a632a46feb1e78
|
bb150497a05203a718fb3630941231be9e3b6a32
|
/framework/api/nn/test_dynamicdecode.py
|
3dfc0093a772141b2e3a8044746f517ce9ae1b98
|
[] |
no_license
|
PaddlePaddle/PaddleTest
|
4fb3dec677f0f13f7f1003fd30df748bf0b5940d
|
bd3790ce72a2a26611b5eda3901651b5a809348f
|
refs/heads/develop
| 2023-09-06T04:23:39.181903
| 2023-09-04T11:17:50
| 2023-09-04T11:17:50
| 383,138,186
| 42
| 312
| null | 2023-09-13T11:13:35
| 2021-07-05T12:44:59
|
Python
|
UTF-8
|
Python
| false
| false
| 20,209
|
py
|
#!/bin/env python
# -*- coding: utf-8 -*-
# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python
"""
test paddle.nn.dynamic_decode
"""
import random
import paddle
from apibase import compare
import pytest
import numpy as np
from paddle.nn import BeamSearchDecoder, dynamic_decode
from paddle.nn import GRUCell, Linear, Embedding, LSTMCell
from paddle.nn import TransformerDecoderLayer, TransformerDecoder
np.random.seed(2)
random.seed(2)
paddle.seed(2)
class ModelGRUCell4(paddle.nn.Layer):
"""
GRUCell model
"""
def __init__(self):
"""
initialize
"""
super(ModelGRUCell4, self).__init__()
self.trg_embeder = Embedding(100, 32)
self.output_layer = Linear(32, 32)
self.decoder_cell = GRUCell(input_size=32, hidden_size=32)
self.decoder = BeamSearchDecoder(
self.decoder_cell,
start_token=0,
end_token=1,
beam_size=4,
embedding_fn=self.trg_embeder,
output_fn=self.output_layer,
)
def forward(self):
"""
forward
"""
encoder_output = paddle.ones((4, 8, 32), dtype=paddle.get_default_dtype())
outputs = dynamic_decode(
decoder=self.decoder, inits=self.decoder_cell.get_initial_states(encoder_output), max_step_num=10
)
return outputs[0]
class ModelGRUCell5(paddle.nn.Layer):
"""
GRUCell model1
"""
def __init__(self):
"""
initialize
"""
super(ModelGRUCell5, self).__init__()
self.trg_embeder = Embedding(100, 32)
self.output_layer = Linear(32, 32)
self.decoder_cell = GRUCell(input_size=32, hidden_size=32)
self.decoder = BeamSearchDecoder(
self.decoder_cell,
start_token=0,
end_token=1,
beam_size=4,
embedding_fn=self.trg_embeder,
output_fn=self.output_layer,
)
def forward(self):
"""
forward
"""
encoder_output = paddle.ones((4, 8, 32), dtype=paddle.get_default_dtype())
outputs = dynamic_decode(
decoder=self.decoder,
inits=self.decoder_cell.get_initial_states(encoder_output),
output_time_major=True,
max_step_num=10,
)
return outputs[0]
class ModelGRUCell6(paddle.nn.Layer):
"""
GRUCell model2
"""
def __init__(self):
"""
initialize
"""
super(ModelGRUCell6, self).__init__()
self.trg_embeder = Embedding(100, 32)
self.output_layer = Linear(32, 32)
self.decoder_cell = GRUCell(input_size=32, hidden_size=32)
self.decoder = BeamSearchDecoder(
self.decoder_cell,
start_token=0,
end_token=1,
beam_size=4,
embedding_fn=self.trg_embeder,
output_fn=self.output_layer,
)
def forward(self):
"""
forward
"""
encoder_output = paddle.ones((4, 8, 32), dtype=paddle.get_default_dtype())
outputs = dynamic_decode(
decoder=self.decoder,
inits=self.decoder_cell.get_initial_states(encoder_output),
is_test=True,
max_step_num=10,
)
return outputs[0]
class ModelGRUCell7(paddle.nn.Layer):
"""
GRUCell model3
"""
def __init__(self):
"""
initialize
"""
super(ModelGRUCell7, self).__init__()
self.trg_embeder = Embedding(100, 32)
self.output_layer = Linear(32, 32)
self.decoder_cell = GRUCell(input_size=32, hidden_size=32)
self.decoder = BeamSearchDecoder(
self.decoder_cell,
start_token=0,
end_token=1,
beam_size=4,
embedding_fn=self.trg_embeder,
output_fn=self.output_layer,
)
def forward(self):
"""
forward
"""
encoder_output = paddle.ones((4, 8, 32), dtype=paddle.get_default_dtype())
outputs = dynamic_decode(
decoder=self.decoder,
inits=self.decoder_cell.get_initial_states(encoder_output),
impute_finished=True,
max_step_num=10,
)
return outputs[0]
class ModelGRUCell8(paddle.nn.Layer):
"""
GRUCell model4
"""
def __init__(self):
"""
initialize
"""
super(ModelGRUCell8, self).__init__()
self.trg_embeder = Embedding(100, 32)
self.output_layer = Linear(32, 32)
self.decoder_cell = GRUCell(input_size=32, hidden_size=32)
self.decoder = BeamSearchDecoder(
self.decoder_cell,
start_token=0,
end_token=1,
beam_size=4,
embedding_fn=self.trg_embeder,
output_fn=self.output_layer,
)
def forward(self):
"""
forward
"""
encoder_output = paddle.ones((4, 8, 32), dtype=paddle.get_default_dtype())
outputs = dynamic_decode(
decoder=self.decoder,
inits=self.decoder_cell.get_initial_states(encoder_output),
return_length=True,
max_step_num=10,
)
return outputs[2]
class ModelLSTMCell1(paddle.nn.Layer):
"""
LSTMCell model
"""
def __init__(self):
"""
initialize
"""
super(ModelLSTMCell1, self).__init__()
self.trg_embeder = Embedding(100, 32)
self.output_layer = Linear(32, 32)
self.decoder_cell = LSTMCell(input_size=32, hidden_size=32)
self.decoder = BeamSearchDecoder(
self.decoder_cell,
start_token=0,
end_token=1,
beam_size=4,
embedding_fn=self.trg_embeder,
output_fn=self.output_layer,
)
def forward(self):
"""
forward
"""
encoder_output = paddle.ones((4, 8, 32), dtype=paddle.get_default_dtype())
outputs = dynamic_decode(
decoder=self.decoder, inits=self.decoder_cell.get_initial_states(encoder_output), max_step_num=10
)
return outputs[0]
class ModelLSTMCell2(paddle.nn.Layer):
"""
LSTMCell model1
"""
def __init__(self):
"""
initialize
"""
super(ModelLSTMCell2, self).__init__()
self.trg_embeder = Embedding(100, 16)
self.output_layer = Linear(16, 16)
self.decoder_cell = LSTMCell(input_size=16, hidden_size=16)
self.decoder = BeamSearchDecoder(
self.decoder_cell,
start_token=0,
end_token=1,
beam_size=4,
embedding_fn=self.trg_embeder,
output_fn=self.output_layer,
)
def forward(self):
"""
forward
"""
encoder_output = paddle.ones((4, 4, 16), dtype=paddle.get_default_dtype())
outputs = dynamic_decode(
decoder=self.decoder, inits=self.decoder_cell.get_initial_states(encoder_output), max_step_num=10
)
return outputs[0]
class ModelLSTMCell3(paddle.nn.Layer):
"""
LSTMCell model2
"""
def __init__(self):
"""
initialize
"""
super(ModelLSTMCell3, self).__init__()
self.trg_embeder = Embedding(100, 32)
self.output_layer = Linear(32, 32)
self.decoder_cell = LSTMCell(input_size=32, hidden_size=32)
self.decoder = BeamSearchDecoder(
self.decoder_cell,
start_token=0,
end_token=1,
beam_size=4,
embedding_fn=self.trg_embeder,
output_fn=self.output_layer,
)
def forward(self):
"""
forward
"""
encoder_output = paddle.ones((4, 8, 32), dtype=paddle.get_default_dtype())
outputs = dynamic_decode(
decoder=self.decoder, inits=self.decoder_cell.get_initial_states(encoder_output), max_step_num=5
)
return outputs[0]
@pytest.mark.api_nn_dynamic_decode_parameters
def test_dynamic_decode0():
"""
GRUCell
"""
# paddle.seed(33)
m = ModelGRUCell4()
a = paddle.load("model/model_grucell4")
m.set_state_dict(a)
res = [
[
[23, 23, 23, 23],
[9, 23, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 23, 27],
],
[
[23, 23, 23, 23],
[9, 23, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 23, 27],
],
[
[23, 23, 23, 23],
[9, 23, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 23, 27],
],
[
[23, 23, 23, 23],
[9, 23, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 23, 27],
],
]
compare(m().numpy(), res)
@pytest.mark.api_nn_dynamic_decode_parameters
def test_dynamic_decode1():
"""
change the decoder cell to LSTMCell
"""
m = ModelLSTMCell1()
a = paddle.load("model/model_lstmcell1")
m.set_state_dict(a)
res = [
[
[4, 4, 22, 4],
[4, 4, 4, 4],
[30, 20, 20, 30],
[30, 30, 30, 30],
[30, 30, 30, 30],
[30, 30, 30, 30],
[30, 30, 30, 30],
[30, 30, 30, 30],
[30, 30, 30, 30],
[30, 30, 30, 30],
[30, 30, 30, 20],
],
[
[4, 4, 22, 4],
[4, 4, 4, 4],
[30, 20, 20, 30],
[30, 30, 30, 30],
[30, 30, 30, 30],
[30, 30, 30, 30],
[30, 30, 30, 30],
[30, 30, 30, 30],
[30, 30, 30, 30],
[30, 30, 30, 30],
[30, 30, 30, 20],
],
[
[4, 4, 22, 4],
[4, 4, 4, 4],
[30, 20, 20, 30],
[30, 30, 30, 30],
[30, 30, 30, 30],
[30, 30, 30, 30],
[30, 30, 30, 30],
[30, 30, 30, 30],
[30, 30, 30, 30],
[30, 30, 30, 30],
[30, 30, 30, 20],
],
[
[4, 4, 22, 4],
[4, 4, 4, 4],
[30, 20, 20, 30],
[30, 30, 30, 30],
[30, 30, 30, 30],
[30, 30, 30, 30],
[30, 30, 30, 30],
[30, 30, 30, 30],
[30, 30, 30, 30],
[30, 30, 30, 30],
[30, 30, 30, 20],
],
]
compare(m().numpy(), res)
@pytest.mark.api_nn_dynamic_decode_parameters
def test_dynamic_decode2():
"""
change the input size
"""
m = ModelLSTMCell2()
a = paddle.load("model/model_lstmcell2")
m.set_state_dict(a)
res = [
[
[4, 4, 4, 4],
[4, 4, 4, 4],
[4, 4, 4, 4],
[4, 4, 4, 4],
[4, 4, 4, 4],
[4, 4, 4, 4],
[4, 4, 4, 4],
[4, 4, 4, 4],
[4, 4, 4, 4],
[4, 4, 9, 9],
[4, 9, 9, 4],
],
[
[4, 4, 4, 4],
[4, 4, 4, 4],
[4, 4, 4, 4],
[4, 4, 4, 4],
[4, 4, 4, 4],
[4, 4, 4, 4],
[4, 4, 4, 4],
[4, 4, 4, 4],
[4, 4, 4, 4],
[4, 4, 9, 9],
[4, 9, 9, 4],
],
[
[4, 4, 4, 4],
[4, 4, 4, 4],
[4, 4, 4, 4],
[4, 4, 4, 4],
[4, 4, 4, 4],
[4, 4, 4, 4],
[4, 4, 4, 4],
[4, 4, 4, 4],
[4, 4, 4, 4],
[4, 4, 9, 9],
[4, 9, 9, 4],
],
[
[4, 4, 4, 4],
[4, 4, 4, 4],
[4, 4, 4, 4],
[4, 4, 4, 4],
[4, 4, 4, 4],
[4, 4, 4, 4],
[4, 4, 4, 4],
[4, 4, 4, 4],
[4, 4, 4, 4],
[4, 4, 9, 9],
[4, 9, 9, 4],
],
]
compare(m().numpy(), res)
@pytest.mark.api_nn_dynamic_decode_parameters
def test_dynamic_decode3():
"""
change the max_step_num
"""
m = ModelLSTMCell3()
a = paddle.load("model/model_lstmcell3")
m.set_state_dict(a)
res = [
[[4, 4, 22, 4], [4, 4, 4, 4], [30, 20, 20, 30], [30, 30, 30, 30], [30, 30, 30, 30], [30, 30, 30, 20]],
[[4, 4, 22, 4], [4, 4, 4, 4], [30, 20, 20, 30], [30, 30, 30, 30], [30, 30, 30, 30], [30, 30, 30, 20]],
[[4, 4, 22, 4], [4, 4, 4, 4], [30, 20, 20, 30], [30, 30, 30, 30], [30, 30, 30, 30], [30, 30, 30, 20]],
[[4, 4, 22, 4], [4, 4, 4, 4], [30, 20, 20, 30], [30, 30, 30, 30], [30, 30, 30, 30], [30, 30, 30, 20]],
]
compare(m().numpy(), res)
@pytest.mark.api_nn_dynamic_decode_parameters
def test_dynamic_decode4():
"""
set the output_time_major True
"""
m = ModelGRUCell5()
a = paddle.load("model/model_grucell5")
m.set_state_dict(a)
res = [
[[23, 23, 23, 23], [23, 23, 23, 23], [23, 23, 23, 23], [23, 23, 23, 23]],
[[9, 23, 9, 9], [9, 23, 9, 9], [9, 23, 9, 9], [9, 23, 9, 9]],
[[9, 9, 9, 9], [9, 9, 9, 9], [9, 9, 9, 9], [9, 9, 9, 9]],
[[9, 9, 9, 9], [9, 9, 9, 9], [9, 9, 9, 9], [9, 9, 9, 9]],
[[9, 9, 9, 9], [9, 9, 9, 9], [9, 9, 9, 9], [9, 9, 9, 9]],
[[9, 9, 9, 9], [9, 9, 9, 9], [9, 9, 9, 9], [9, 9, 9, 9]],
[[9, 9, 9, 9], [9, 9, 9, 9], [9, 9, 9, 9], [9, 9, 9, 9]],
[[9, 9, 9, 9], [9, 9, 9, 9], [9, 9, 9, 9], [9, 9, 9, 9]],
[[9, 9, 9, 9], [9, 9, 9, 9], [9, 9, 9, 9], [9, 9, 9, 9]],
[[9, 9, 9, 9], [9, 9, 9, 9], [9, 9, 9, 9], [9, 9, 9, 9]],
[[9, 9, 23, 27], [9, 9, 23, 27], [9, 9, 23, 27], [9, 9, 23, 27]],
]
compare(m().numpy(), res)
@pytest.mark.api_nn_dynamic_decode_parameters
def test_dynamic_decode5():
"""
set the is_test True
"""
m = ModelGRUCell6()
a = paddle.load("model/model_grucell6")
m.set_state_dict(a)
res = [
[
[23, 23, 23, 23],
[9, 23, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 23, 27],
],
[
[23, 23, 23, 23],
[9, 23, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 23, 27],
],
[
[23, 23, 23, 23],
[9, 23, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 23, 27],
],
[
[23, 23, 23, 23],
[9, 23, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 23, 27],
],
]
compare(m().numpy(), res)
@pytest.mark.api_nn_dynamic_decode_parameters
def test_dynamic_decode6():
"""
set the impute_finished True
"""
m = ModelGRUCell7()
a = paddle.load("model/model_grucell7")
m.set_state_dict(a)
res = [
[
[23, 23, 23, 23],
[9, 23, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 23, 27],
],
[
[23, 23, 23, 23],
[9, 23, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 23, 27],
],
[
[23, 23, 23, 23],
[9, 23, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 23, 27],
],
[
[23, 23, 23, 23],
[9, 23, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 23, 27],
],
]
compare(m().numpy(), res)
@pytest.mark.api_nn_dynamic_decode_parameters
def test_dynamic_decode7():
"""
set the return_length True
"""
m = ModelGRUCell8()
a = paddle.load("model/model_grucell8")
m.set_state_dict(a)
res = [[11, 11, 11, 11], [11, 11, 11, 11], [11, 11, 11, 11], [11, 11, 11, 11]]
compare(m().numpy(), res)
@pytest.mark.api_nn_dynamic_decode_exception
def test_dynamic_decode10():
"""
Decoder type error
"""
decoder_cell = LSTMCell(input_size=32, hidden_size=32)
output_layer = TransformerDecoderLayer(32, 2, 128)
decoder = TransformerDecoder(output_layer, 2)
encoder_output = paddle.ones((4, 8, 32), dtype=paddle.get_default_dtype())
try:
dynamic_decode(decoder=decoder, inits=decoder_cell.get_initial_states(encoder_output), max_step_num=10)
except Exception as e:
# print(e)
if "object has no attribute 'initialize'" in e.args[0]:
pass
else:
raise Exception
@pytest.mark.skip(reason="RD代码异常改变,此Case会报错,暂时跳过")
@pytest.mark.api_nn_dynamic_decode_exception
def test_dynamic_decode11():
"""
No parameters passed to inits
"""
paddle.seed(33)
trg_embeder = Embedding(100, 32)
output_layer = Linear(32, 32)
decoder_cell = GRUCell(input_size=32, hidden_size=32)
decoder = BeamSearchDecoder(
decoder_cell, start_token=0, end_token=1, beam_size=4, embedding_fn=trg_embeder, output_fn=output_layer
)
try:
dynamic_decode(decoder=decoder, max_step_num=5)
except Exception as e:
# print(e)
error = "'NoneType' object has no attribute 'dtype'"
if error in e.args[0]:
pass
else:
raise Exception
@pytest.mark.skip(reason="RD代码异常改变,此Case会报错,暂时跳过")
@pytest.mark.api_nn_dynamic_decode_exception
def test_dynamic_decode12():
"""
the size of inits mismatch the size of the decoder
"""
paddle.seed(33)
trg_embeder = Embedding(100, 32)
output_layer = Linear(32, 32)
decoder_cell = LSTMCell(input_size=32, hidden_size=32)
decoder = BeamSearchDecoder(
decoder_cell, start_token=0, end_token=1, beam_size=4, embedding_fn=trg_embeder, output_fn=output_layer
)
encoder_output = paddle.ones((4, 8, 32), dtype=paddle.get_default_dtype())
decoder_initial_states = [
decoder_cell.get_initial_states(encoder_output, shape=[16]),
decoder_cell.get_initial_states(encoder_output, shape=[16]),
]
try:
dynamic_decode(decoder=decoder, inits=decoder_initial_states, max_step_num=5)
except Exception as e:
if "[operator < matmul_v2 > error]" in e.args[0]:
pass
else:
raise Exception
|
[
"noreply@github.com"
] |
noreply@github.com
|
0a4f0d71af479b78c4d2993b8c4a84ed458e3ae1
|
2c886cc64c9c7ff59d02f8637c1e765e7911f079
|
/aarms/data/msd/echonest.py
|
a760b9ae3af8740258d48c5fb7f8a22a0a3d4215
|
[
"MIT"
] |
permissive
|
eldrin/aarms
|
8c6b0a095fa0bc69803af933d4bcc0a28fb0a7e1
|
bdd5455ac8dcfc1fe91a12fdd132b74e6c37609d
|
refs/heads/master
| 2023-04-03T23:56:56.516979
| 2021-03-30T20:22:52
| 2021-03-30T20:22:52
| 252,807,989
| 0
| 0
|
MIT
| 2021-03-30T20:22:52
| 2020-04-03T18:22:15
|
Python
|
UTF-8
|
Python
| false
| false
| 1,583
|
py
|
from os.path import join
import csv
from scipy import sparse as sp
import sqlite3
from tqdm import tqdm
N_INTERACTIONS = 48373586
def load_echonest(path, verbose=False):
"""
"""
with open(join(path, 'train_triplets.txt'), 'r') as f:
users = {}
items = {}
I, J, V = [], [], []
with tqdm(total=N_INTERACTIONS, ncols=80, disable=not verbose) as prog:
for uid, sid, cnt in csv.reader(f, delimiter='\t'):
if uid not in users:
users[uid] = len(users)
if sid not in items:
items[sid] = len(items)
I.append(users[uid])
J.append(items[sid])
V.append(float(cnt))
prog.update()
X = sp.coo_matrix((V, (I, J)), shape=(len(users), len(items))).tocsr()
return {
'user_song': X,
'users': users,
'items': items
}
def load_echonest_from_sqlitedb(db_file):
"""
"""
with sqlite3.connect(db_file) as conn:
c = conn.cursor()
I, J, V = [], [], []
for u, i, v in c.execute('SELECT * FROM user_song'):
I.append(u)
J.append(i)
V.append(v)
users = [r[0] for r in c.execute('SELECT user FROM users')]
songs = [r[0] for r in c.execute('SELECT song FROM songs')]
# convert to CSR matrix
X = sp.coo_matrix((V, (I, J)), shape=(len(users), len(songs)))
X = X.tocsr()
return {
'user_song': X,
'users': users,
'songs': songs
}
|
[
"jaehun.j.kim@gmail.com"
] |
jaehun.j.kim@gmail.com
|
3add1f213eb7b59613b0794ec7004fc7996b804b
|
4ea06addb40da22573bbfb4a0253406b564ae2cd
|
/test38Simp.py
|
137aa3fb83a348e62d47974376e5c4c3d9cc0113
|
[] |
no_license
|
AldyColares/Projetos_MNii
|
5eff276daf7f7139b8875fb20bfa405af44639a9
|
43dc45cb2a7890837257f36934d0d32b5e40fc67
|
refs/heads/master
| 2016-09-11T05:48:43.756753
| 2014-03-21T14:50:45
| 2014-03-21T14:50:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 664
|
py
|
import re
arquivo = open("arquivo1.txt")
m = int(arquivo.readline().rstrip('\n'))
txt = arquivo.read()
print "grau =",m
print "\nxi\tf(xi)"
print txt
dados = map(float, re.split('\t|\n',txt))
arquivo.close()
a = dados[0]
b = dados[m*2]
fx0 = dados[1]
fxm = dados[m*2+1]
h = (b - a)/m
L = range(m+1)
i=1
j=0
S1=0
S2=0
k=1
while ( i <= m*2+1 ):
L[j] = dados[i]
i = i+2
j = j+1
while(k<m):
if int(k) % 3 == 0:
S1 = S1 + L[k]
else:
S2 = S2 + L[k]
k = k+1
I = (3*h/8)*(fx0 + fxm + 3*S2 + 2*S1)
print "\na =",a
print "b =",b
print "h =",h
print "f(x0) =",fx0
print "f(xm) =",fxm
print "Somatorio de impar =",S2
print "Somatorio de par =",S1
print "\nI =",I
|
[
"dyego@alu.ufc.br"
] |
dyego@alu.ufc.br
|
3f1b0191d31826c1fdcf7c016004635c307f9ca8
|
17659bdaf60e799941c5d7863e08d1a5d2308382
|
/src/scenic/simulators/webots/mars/__init__.py
|
baaff9c834c5aab9c4f29557045398f9689e1813
|
[
"BSD-3-Clause"
] |
permissive
|
cahartsell/Scenic
|
60a21fc95ea29629cc8d753feaed5589052ff19f
|
2e7979011aef426108687947668d9ba6f5439136
|
refs/heads/master
| 2023-01-11T07:58:05.869681
| 2020-11-09T20:25:58
| 2020-11-09T20:25:58
| 283,645,281
| 0
| 0
|
NOASSERTION
| 2020-07-30T02:02:01
| 2020-07-30T02:02:01
| null |
UTF-8
|
Python
| false
| false
| 156
|
py
|
"""World model for a simple Mars rover example in Webots.
.. raw:: html
<h2>Submodules</h2>
.. autosummary::
:toctree: _autosummary
model
"""
|
[
"dfremont@ucsc.edu"
] |
dfremont@ucsc.edu
|
0fed34a34be9f0c7a478742d654290d73259c7fd
|
510765d4cc0bbb8e16a31acef5a619abae9cd736
|
/SML_project1_6.py
|
07ffb9ad85587f51947ccb985aebfacf878b605d
|
[] |
no_license
|
LzyloveRila/twitter-Authorship-attribution
|
e9c6435465bdc2076ffdc774e1a270537ec54356
|
7c062abd22633016fe5721ec7acc88a1a93aaf89
|
refs/heads/master
| 2020-07-23T10:09:42.192542
| 2019-09-16T10:11:37
| 2019-09-16T10:11:37
| 207,523,998
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,715
|
py
|
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
import nltk
from nltk.tokenize import word_tokenize
from nltk.tokenize import TweetTokenizer
from sklearn.linear_model import SGDClassifier
from sklearn.pipeline import Pipeline
"""-----------------------------------------------------"""
# f=open('preprocessing_havestopword_part.txt')
f=open('lemmer_PosTag.txt')
Trainning_set = f.readlines()
tweets=[]
label=[]
for line in Trainning_set:
tweets.append(line.split("\t")[1])
label.append(line.split("\t")[0])
X_train, X_test, Y_train, Y_test = train_test_split(np.array(tweets), label, test_size=0.05, random_state=90051)
sample_split= "Training set has {} instances. Test set has {} instances.".format(X_train.shape[0], X_test.shape[0])
def my_tokenize(s):
tknzr = TweetTokenizer()
return tknzr.tokenize(s)
#return nltk.word_tokenize(s)
count_vect = CountVectorizer(tokenizer=my_tokenize,lowercase=False)
X_train_counts = count_vect.fit_transform(X_train)
X_train_counts_shape = "X_train_counts shape:",X_train_counts.shape
from sklearn.feature_extraction.text import TfidfTransformer
tf_transformer = TfidfTransformer(use_idf=False).fit(X_train_counts)
X_train_tf = tf_transformer.transform(X_train_counts)
tfidf_transformer = TfidfTransformer()
X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)
text_clf = Pipeline([('vect', CountVectorizer(tokenizer=my_tokenize)),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier(loss='hinge', penalty='l2',alpha=1e-4,
random_state=42,max_iter=20, tol=None)),])
text_clf.fit(X_train, Y_train)
predicted = text_clf.predict(X_test)
accuracy = np.mean(predicted == Y_test)
print(accuracy)
# predict test data
f2=open('preprocess_lemm_test_postag.txt')
predict = []
predict = f2.readlines()
print(len(predict))
predicted = text_clf.predict(predict)
f2.close()
#output
f=open('1_6.txt','w')
for i in range(len(predicted)):
f.write(str(i)+","+str(predicted[i]))
f.write('\n')
f.close()
f1 = open('record1_4.txt','w')
f1.write("Training1: Preprocess:nostemmer,postag,twitter token; Feature:countervectorizer+tfidf"+
"Loss:hinge, max_iter:20, set_split:0.05")
# f1.write(sample_split)
# f1.write(X_train_counts_shape)
f1.write(str(accuracy))
# f1.write("predict length:",len(predict))
f1.close()
# # #save model to disk
# import pickle
# file_name = "BOW SGD1.sav"
# pickle.dump(text_clf,open(file_name,'wb'),protocol=4)
# # load the model from disk
# loaded_model = pickle.load(open(filename, 'rb'))
# result = loaded_model.score(X_test, Y_test)
# print(result)
|
[
"noreply@github.com"
] |
noreply@github.com
|
10a68c431ed91e2b2ac04ec4a3f1b21f88858fba
|
4a4e59ee97112c69412f61ccee0c885bc8230834
|
/ssfunction.py
|
79f93e516ac96e000cf91f25b49f9de144c0189e
|
[] |
no_license
|
harshantil/Firstpython
|
f11c846aa7b80ac5deababa8bf77efc49e732006
|
7f93cbd3013126c48def448d511c652c619c87d9
|
refs/heads/main
| 2023-07-06T00:16:46.784303
| 2021-08-06T11:42:22
| 2021-08-06T11:42:22
| 377,393,270
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 854
|
py
|
import time
from selenium import webdriver
from pynput.keyboard import *
def browser(driver):
driver = webdriver.Chrome(r"C:\Users\harsh\Downloads\chromedriver_win32\chromedriver.exe")
url = "https://accounts.google.com/signin/v2/identifie"
driver.get(url) # Going to Url
driver.maximize_window()
signin_user = driver.find_element_by_name("identifier")
signin_user.clear()
signin_user.send_keys("harshantil")
kb = Controller()
kb.press(Key.enter)
kb.release(Key.enter)
signin_pass = driver.find_element_by_name("password")
signin_pass.clear()
signin_pass.send_keys("12345678")
def screenshot(d):
folder =r"C:\\Users\\harsh\\Desktop\\testing\\Screenshot\\"
time_string = time.asctime().replace(":",".")
file_name = folder + time_string + ".png"
d.get_screenshot_as_file(file_name)
|
[
"harshantil@gmail.com"
] |
harshantil@gmail.com
|
be62c7f3c5cef47b942b7cd5168fccf4f58c10c0
|
6650b65399aed93cfbc1abc55f2160e3d911b069
|
/noun_generator.py
|
b1507100ae448c1b4cc5296d777a9c6c38ef43d7
|
[] |
no_license
|
Simon198/german_noun_generator_bot
|
832c3e1d80ae04e0bfa1a2d4e171184204ab48c1
|
1eb8368514fdd8c52a17def2f944de22dcdbe950
|
refs/heads/main
| 2023-02-05T06:21:57.560060
| 2020-12-24T13:19:21
| 2020-12-24T13:19:21
| 324,149,666
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,350
|
py
|
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext
from telegram import Update, Bot
import os
import random
dir_path = os.path.abspath(os.path.dirname(__file__))
with open(dir_path + '/nouns.txt', 'rb') as file:
nouns = file.read()
nouns = nouns.decode('utf-8').split('\n')
with open(dir_path + '/TOKEN.txt', 'r') as file:
token = file.read()
def welcome_message (update, context):
update.message.reply_text('Guten Tag Freund')
update.message.reply_text('Über den Befehl /generate kannst du fünf zufällig deutsche Nomen generieren.')
def generate_random_noun (update, context):
num_nouns = 5
if len(context.args) > 0:
try:
num_nouns = int(context.args[0])
except:
update.message.reply_text('Du musst eine Zahl hinter /generate eingeben')
return
random_nouns = random.sample(range(len(nouns)), num_nouns)
for i, noun_index in enumerate(random_nouns):
update.message.reply_text(str(i + 1) + ' - ' + nouns[noun_index])
def main ():
updater = Updater(token)
dp = updater.dispatcher
dp.add_handler(CommandHandler('start', welcome_message))
dp.add_handler(CommandHandler('generate', generate_random_noun))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
|
[
"simon.heinrich@iesy.net"
] |
simon.heinrich@iesy.net
|
337f7594697dfc64854074ccb19bdcce8234e917
|
7c5fa53b0bf3e45aabc0513f31ee17ad1233bb36
|
/traffic_generator/DragonflyLoadSingleGlobalLinkTrafficGenerator.py
|
e351dfd901bb188d2cd52e0e7dd685b6c744c00a
|
[
"MIT"
] |
permissive
|
minyee/TAGO
|
cd20587a170153871c62636ed75bbe6cbaf36655
|
9fea77cc39aa035796ab3ca52e95ebb66ffe0e7f
|
refs/heads/master
| 2022-09-18T07:00:30.525054
| 2020-06-01T00:47:57
| 2020-06-01T00:47:57
| 268,355,125
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,376
|
py
|
import TrafficGenerator, sys, os
sys.path.append('../')
import UniformGroupDragonfly
import numpy as np
class DragonflyLoadSingleGlobalLinkTrafficGenerator(TrafficGenerator.TrafficGenerator):
def __init__(self, topology):
TrafficGenerator.TrafficGenerator.__init__(self, topology)
return
def generate_traffic(self):
num_switches = self.topology.get_total_num_switches()
traffic_matrix = np.zeros((num_switches, num_switches))
num_blocks = self.topology.get_num_blocks()
switch_to_block_id_map = self.topology.get_switch_id_to_block_id_map()
block_to_switches_map = self.topology.get_block_id_to_switch_ids()
adj_matrix = self.topology.get_adjacency_matrix()
number_of_global_links = 0
for i in range(num_switches):
i_block = switch_to_block_id_map[i]
for j in range(num_switches):
j_block = switch_to_block_id_map[j]
if i_block != j_block and adj_matrix[i][j] > 0:
number_of_global_links += adj_matrix[i][j]
entry_probability = 1./number_of_global_links
for i in range(num_switches):
i_block = switch_to_block_id_map[i]
for j in range(num_switches):
j_block = switch_to_block_id_map[j]
if i_block != j_block and adj_matrix[i][j] > 0:
traffic_matrix[i][j] = adj_matrix[i][j] * entry_probability
print traffic_matrix
return traffic_matrix
def to_string(self):
return "dfly_strain_single_link"
|
[
"mt3126@columbia.edu"
] |
mt3126@columbia.edu
|
c299077986bf62f2c38ac444a12dff977c5aaf2c
|
3d46889bd80a69d665de0d61d1035b04359154d3
|
/PolymorphismSubmissionAssignment.py
|
492f5d5e74c16290541e16ac7ccac083053c587e
|
[] |
no_license
|
Kelinz74/Python-Projects
|
182e30636775a2ada5cb5f4735142df569c28fac
|
87cb6a97f394031c2ce6eeda22e1e7f5fe8d9b3e
|
refs/heads/main
| 2023-08-24T08:42:27.177603
| 2021-09-30T03:02:12
| 2021-09-30T03:02:12
| 402,674,304
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,438
|
py
|
## Parent class
class Avenger:
company = "Avengers"
name = ""
email = ""
password = ""
department = ""
# a function for the parent class for a mission statement to be displaid with each successful login
def foundation(self):
msg = "Protecting the future: {}\n".format(self.company)
return msg
## Child class used for a user (like a customer)
class User(Avenger):
name = "Captain America"
email = "cap@gmail.com"
password = "IronManSucks@5914"
# a function for the child class login input
def getLoginInfo(self):
entry_name = input("Enter your name: ")
entry_email = input("Enter your email: ")
entry_password = input("Enter your password: ")
# A welcome back statement display if login successful
if (entry_email == self.email and entry_password == self.password):
print("\nWelcome back, {}".format(entry_name))
company = User()
print(company.foundation())
# A incoreect statement display if login unsuccessful
else:
print("The password or email is incorrect.")
customer = User()
customer.getLoginInfo()
## child class used for an employee log in.
class Employee(Avenger):
name = "Stephen Strange"
email = "drstrange@gmail.com"
title = "Sorcerer Supreme"
department = "Time"
pin_number = "1130"
# a function for the child class login input
def getLoginInfo(self):
entry_name = input("Enter your name: ")
entry_email = input("Enter your email: ")
entry_pin = input("Enter your pin: ")
# A welcome back statement display if login successful
if (entry_email == self.email and entry_pin == self.pin_number):
print("\nWelcome back, {}".format(entry_name))
company = User()
print(company.foundation())
# A incoreect statement display if login unsuccessful
else:
print("The pin or email is incorrect.")
manager = Employee()
manager.getLoginInfo()
## child class used for a cleaning person login (Janitorial)
class Janitorial(Avenger):
name = "Thor"
email = "heavyhammer@gmail.com"
title = "Janitor"
tools = "Mop"
pin_number = "7941"
# a function for the child class login input
def getLoginInfo(self):
entry_name = input("Enter your name: ")
entry_email = input("Enter your email: ")
entry_pin = input("Enter your pin: ")
# A welcome back statement display if login successful
if (entry_email == self.email and entry_pin == self.pin_number):
print("\nWelcome back, {}".format(entry_name))
company = User()
print(company.foundation())
# A incoreect statement display if login unsuccessful
else:
print("The pin or email is incorrect.")
janitor = Janitorial()
janitor.getLoginInfo()
# calls to each class for login input and display message if successful or unsuccessful login.
if __name__ == "__main__":
customer = User()
customer.getLoginInfo()
manager = Employee()
manager.getLoginInfo()
janitor = Janitorial()
janitor.getLoginInfo()
|
[
"noreply@github.com"
] |
noreply@github.com
|
fd79b74367b169eecee4829c8730e2662173b58b
|
3efa3a2bcdd38c27beeb967a9e99c6afc17e6e6f
|
/pipelines/pipeline_dianping.py
|
89e7dab6d480d935465d72ac2124b52a26663b5e
|
[] |
no_license
|
chocoai/integrated_crawler
|
6f266ef54d096096c71ec5bd28463393164126d1
|
5d75d2781d2adfcd6524e8a2edfeb2fb2267571b
|
refs/heads/master
| 2020-04-26T03:07:07.995759
| 2019-02-25T10:42:59
| 2019-02-25T10:42:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,512
|
py
|
# -*- coding: utf-8 -*-
import os, re
import time, datetime
import csv
import sqlite3 as sql
import ssl
import pandas as pd
from utils.general_request import *
logging.basicConfig(filename='logs/utils_pipeline_dianping.log', level=logging.WARNING,
format="%(asctime)s - %(levelname)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S %p")
TIME_INTERVAL_TO_NEXT_PAGE = 2.0
TIME_INTERVAL_TO_NEXT_CITY = 2.0
def get_city_id(csvfilename):
city_ids = dict()
url = 'http://www.dianping.com/citylist'
h = request_url(url, 'GET')
groups = h.find_all('li', class_='letter-item')
with open(csvfilename, 'w+', encoding='UTF-8', newline='') as csvfile:
csvfile.write('city_name,city_url,city_id\n')
for group in groups:
print('Now finding cities whose first-letter = ' + group.find('div', class_='oneletter').text)
city_links = group.find_all('a')
for city_link in city_links:
city = city_link.text
city_url = 'http:' + city_link.attrs['href'] + '/'
h = request_url(city_url, 'GET')
start_point = str(h).find("'cityId'")
end_point = str(h).find(", // 城市id")
city_id = str(h)[start_point + 11:end_point - 1]
csvfile.write(city + ',' + city_url + ',' + city_id + '\n')
time.sleep(TIME_INTERVAL_TO_NEXT_CITY)
return city_ids
def search_restaurant_in_city(keywords, city_id):
url = 'https://www.dianping.com/search/keyword/{}/10_{}'.format(str(city_id), keywords)
h = request_url(url)
detail_csvfile = 'data/dianping_results/raw/' + 'restaurant_details_' + keywords + '.csv'
total_number = 0
if h.find('div', class_='page') is None:
total_pages = 1
else:
total_pages = int(h.find('div', class_='page').find_all('a')[-2].attrs['data-ga-page'])
cur_page = 1
while True:
not_found_div = h.find('div', class_='not-found')
if not_found_div is None:
shoplist = h.find('div', {'id': 'shop-all-list'})
if shoplist is not None:
lis = shoplist.find_all('li')
total_number += len(lis)
with open(detail_csvfile, 'a+', encoding='UTF-8', newline='') as f:
for li in lis:
store_title = li.find('div', class_='tit').find('a').attrs['title']
store_id = li.find('div', class_='tit').find('a').attrs['data-shopid']
store_score = li.find('div', class_='comment').find('span').attrs['title']
store_comment_url = li.find('div', class_='comment').find('a').attrs['href']
store_status = li.find('span', class_='istopTrade')
if store_status is None:
line = str(city_id) + ',' + keywords + ',' + store_id + ',' + store_title + \
',' + store_score + ',' + store_comment_url + ',\n'
elif store_status.text != '歇业/关闭':
line = str(city_id) + ',' + keywords + ',' + store_id + ',' + store_title + \
',' + store_score + ',' + store_comment_url + ',歇业/关闭\n'
else:
line = str(city_id) + ',' + keywords + ',' + store_id + ',' + store_title + \
',' + store_score + ',' + store_comment_url + ',' + store_status.text + '\n'
f.write(line)
else:
print('Found {} restaurant in city_id: {}.'.format(str(0), str(city_id)))
return total_number
cur_page += 1
if cur_page <= total_pages:
time.sleep(TIME_INTERVAL_TO_NEXT_PAGE)
if cur_page == 2:
url = url + '/p' + str(cur_page)
else:
url = url.replace('/p' + str(cur_page - 1), '/p' + str(cur_page))
h = request_url(url)
else:
print('Found {} restaurant in city_id: {}.'.format(str(total_number), str(city_id)))
return total_number
def start_crawler(keyword, city_id_list, start_city_id):
for city_id in city_id_list:
if city_id >= start_city_id:
total_number_in_city = search_restaurant_in_city(keyword, city_id)
print('Total results in city: {} == {}.'.format(str(city_id), str(total_number_in_city)))
time.sleep(2.0)
print(requests.get(url_to_del_whitelist + PROXY.split(':')[0]).text)
def search_keyword_in_dianping(keyword, start_city_id=1):
# If using baidu map source:
# bdmap_result_csvfile = 'data/baidumap_results/{}_20190220.csv'.format(keyword)
df_nierson = pd.read_csv('data/dianping_results/nierson_city_list.csv', encoding='gbk')
city_id_list = sorted(list(df_nierson.meituan_city_id))
start_crawler(keyword, city_id_list, start_city_id)
print('Finished crawling info of: ', keyword)
def clean_csv_results(csvfilename):
try:
df = pd.read_csv(csvfilename,
names=['city_id', 'keyword', 'dianping_shop_id', 'shop_title', 'stars', 'shop_url', 'state'],
encoding='UTF-8')
except UnicodeDecodeError as e1:
df = pd.read_csv(csvfilename,
names=['city_id', 'keyword', 'dianping_shop_id', 'shop_title', 'stars', 'shop_url', 'state'],
encoding='gbk')
except Exception as e2:
print('Exception found when cleaning: ', csvfilename)
print(e2)
return
finally:
df = df.drop_duplicates(keep='first')
new_name = csvfilename.replace('raw', 'cleaned')
df.to_csv(new_name, encoding='utf-8')
print('Finished cleaning file: ' + csvfilename)
def clean_data(path='data/dianping_results/raw/'):
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
if name not in ['dianping_city_list.csv', 'nierson_city_list.csv']:
clean_csv_results(path + name)
print('Finished cleaning data.')
def merge_cleaned_data(folder_path='dianping_results/cleaned/'):
dfs = []
for root, dirs, files in os.walk(folder_path, topdown=False):
for name in files:
df = pd.read_csv(folder_path + name, encoding='gbk')
dfs.append(df)
df = pd.concat(dfs)
df.to_csv('dianping_cleaned_in_one.csv', encoding='gbk')
|
[
"kevin_jfzhu@163.com"
] |
kevin_jfzhu@163.com
|
5043e33106daca70e5c1091684a4d3b45a1fbf1b
|
2d15135e3559c65374b38abd47e9289c41c042b2
|
/server.py
|
e4d42f02581eeac5a6e90e1c13c8b659bfeec675
|
[] |
no_license
|
diana-xie/btc_prediction
|
33e418bc35f33835fd5439ddef3eb3c8fd92a4d0
|
bbecc4c45b9ab15a925e704357f689736de93db2
|
refs/heads/master
| 2022-12-12T10:55:44.348461
| 2020-09-10T00:57:57
| 2020-09-10T00:57:57
| 294,164,665
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,222
|
py
|
""" Runs the endpoints for BTC predict, train, unit tests """
import tensorflow as tf
from flask import Flask, jsonify, request
import os
import logging
import pkg_resources
import pandas as pd
from tests.test_conf import test_conf
from tests.test_preprocessing_train import test_preprocessing_train
from tests.test_model_drift import test_model_drift
from train import train_model
from utils import fix_path, process_request
# remove tf warning messages
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
app = Flask(__name__)
port = int(os.environ.get("PORT", 5000))
@app.route('/', methods=['GET'])
def server_is_up():
# print("success")
return 'API is up.'
@app.route('/train', methods=['POST']) # POST
def train_api():
observation = request.json
mae = train_model(observation)
return 'Model has been trained and saved. MAE is {}'.format(mae)
@app.route('/predict', methods=['POST']) # POST
def predict_api():
try:
model = pd.read_pickle(os.path.join(fix_path(), "models/model.pkl"))
logging.info("RFregressor version: ", pkg_resources.get_distribution("scikit-learn"))
# observation = observation.encode() # this code is for scenario where data is encoded as str in POST
# observation = pickle.loads(base64.b64decode(observation))
# request = open('request.json', 'rb') # todo - comment out if not testing locally
observation = request.json
observation = process_request(observation=observation)
pred = model.get_prediction(observation)
return jsonify({"bitcoin prediction": str(pred)})
except Exception as ex:
logging.error("No model was found, so run /train")
""" unit tests"""
@app.route('/test_conf', methods=['GET'])
def unit_tests_conf():
test_conf()
return 'Successfully ran conf test.'
@app.route('/test_preprocess_train', methods=['GET'])
def unit_tests_preprocess():
test_preprocessing_train()
return 'Successfully ran preprocessing and train tests.'
@app.route('/test_drift', methods=['GET'])
def unit_tests_drift():
msg = test_model_drift()
return msg
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0', port=port)
|
[
"noreply@github.com"
] |
noreply@github.com
|
ca4d478fbd596a33844375f314113fc89a94ff1e
|
d674009298cb8ecfeb97ed0dcac5820a1a34c469
|
/catalog/migrations/0002_auto_20201113_1659.py
|
f657f39b1fcef96fe2f0ec45a09a1258f1c3781e
|
[] |
no_license
|
pavelpyn/salon
|
e21eee434a901d7168aa20493690701ba6959611
|
7b88f77caf8d948e81f81b51277f2102d6897d09
|
refs/heads/main
| 2023-01-12T22:28:25.236529
| 2020-11-18T09:28:38
| 2020-11-18T09:28:38
| 313,882,960
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,041
|
py
|
# Generated by Django 3.1.2 on 2020-11-13 13:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='service',
name='price_1',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=10, verbose_name='Цены(от 25 до 40см)'),
),
migrations.AlterField(
model_name='service',
name='price_2',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=10, verbose_name='Цены(от 25 до 40см)'),
),
migrations.AlterField(
model_name='service',
name='price_3',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=10, verbose_name='Цены(от 25 до 40см)'),
),
migrations.AlterField(
model_name='service',
name='price_4',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=10, verbose_name='Цены(от 40 и выше)'),
),
migrations.AlterField(
model_name='service',
name='price_man_all',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=10, verbose_name='Мужская стрижка, стоимость работы'),
),
migrations.AlterField(
model_name='service',
name='price_man_material',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=10, verbose_name='Мужская стрижка, расходные материалы'),
),
migrations.AlterField(
model_name='service',
name='price_nm_1',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=10, verbose_name='Цены без расходных материалов1'),
),
migrations.AlterField(
model_name='service',
name='price_nm_2',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=10, verbose_name='Цены без расходных материалов2'),
),
migrations.AlterField(
model_name='service',
name='price_nm_3',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=10, verbose_name='Цены без расходных материалов3'),
),
migrations.AlterField(
model_name='service',
name='price_nm_4',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=10, verbose_name='Цены без расходных материалов4'),
),
migrations.AlterField(
model_name='service',
name='price_work',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=10, verbose_name='Мужская стрижка, стоимость услуги'),
),
]
|
[
"pavelpyn@gmail.com"
] |
pavelpyn@gmail.com
|
404ccc4de81309e69083b0b19bb3d53830a09a20
|
9b64f0f04707a3a18968fd8f8a3ace718cd597bc
|
/huaweicloud-sdk-gaussdbfornosql/huaweicloudsdkgaussdbfornosql/v3/model/list_instances_datastore_result.py
|
34f5b1f20917eabd5ea29c17543d8217b496429f
|
[
"Apache-2.0"
] |
permissive
|
jaminGH/huaweicloud-sdk-python-v3
|
eeecb3fb0f3396a475995df36d17095038615fba
|
83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b
|
refs/heads/master
| 2023-06-18T11:49:13.958677
| 2021-07-16T07:57:47
| 2021-07-16T07:57:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,366
|
py
|
# coding: utf-8
import re
import six
class ListInstancesDatastoreResult:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'type': 'str',
'version': 'str'
}
attribute_map = {
'type': 'type',
'version': 'version'
}
def __init__(self, type=None, version=None):
"""ListInstancesDatastoreResult - a model defined in huaweicloud sdk"""
self._type = None
self._version = None
self.discriminator = None
self.type = type
self.version = version
@property
def type(self):
"""Gets the type of this ListInstancesDatastoreResult.
数据库引擎。
:return: The type of this ListInstancesDatastoreResult.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ListInstancesDatastoreResult.
数据库引擎。
:param type: The type of this ListInstancesDatastoreResult.
:type: str
"""
self._type = type
@property
def version(self):
"""Gets the version of this ListInstancesDatastoreResult.
数据库版本号。
:return: The version of this ListInstancesDatastoreResult.
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this ListInstancesDatastoreResult.
数据库版本号。
:param version: The version of this ListInstancesDatastoreResult.
:type: str
"""
self._version = version
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListInstancesDatastoreResult):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
ae7fc20183651cc33c0675dd8b8869440efb4d14
|
2e4bd74698ce47c5f81699076bd367407a1e3a72
|
/lists/tests.py
|
e40f2691d63b115fbc66d5969035aef9ed67542b
|
[] |
no_license
|
Onwughara-CK/obey_the_testing_goat
|
2e0e1d2f1b828b69e4eb638e4a8f18323e6a3abb
|
eaedc4203acb9b9ea461c9970e79a10a53e622ce
|
refs/heads/master
| 2022-11-30T02:55:30.345380
| 2020-08-17T21:02:41
| 2020-08-17T21:02:41
| 287,971,710
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 600
|
py
|
from django.test import TestCase
from django.urls import resolve
from django.http import HttpRequest
from .views import home_page
class HomePageTest(TestCase):
def test_root_url_resolves_to_home_page_view(self):
self.assertEqual(resolve("/").func, home_page)
def test_home_page_returns_correct_html(self):
request = HttpRequest()
response = home_page(request)
html = response.content.decode('utf8')
self.assertTrue(html.startswith('<html>'))
self.assertIn('<title>To-Do lists</title>', html)
self.assertTrue(html.endswith('<html>'))
|
[
"kelechicollins.93@gmail.com"
] |
kelechicollins.93@gmail.com
|
2d883d197753ff27c2d2713d689c61047b3dd2eb
|
517693716ff4d3f642dda194767cbc03bb37cd1b
|
/src/data_functions.py
|
5d48879422f5b92f52c3c1c5dbc7b4a6d8dd580a
|
[] |
no_license
|
bradley-p/Solar_Energy_Forecasting
|
3cb1951507a1336ee0cf65133cfd0b861ee7454c
|
22317b2fdf51e3d973b32ceef42bc6e68754f6cc
|
refs/heads/main
| 2023-04-17T22:52:28.000990
| 2021-05-04T17:03:18
| 2021-05-04T17:03:18
| 348,751,582
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,194
|
py
|
import numpy as np
import astral
from astral import sun
import pytz
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
###
# File contains methods useful for curating data
# helps to clean-up the data curating notebook
# provides method that computes elevation, azimuth, and zenith using astral package
##
def plotRegression(truth, pred):
plt.figure(figsize=(10,10))
plt.scatter(truth, pred)
plt.grid()
plt.xlabel("Truth")
plt.ylabel("Predicted")
plt.title("Truth Plotted against actual value")
plt.plot([min(truth),max(truth)], [min(truth),max(truth)], 'r')
plt.show()
def computeAverageError(pred, y):
err = []
for i in range(len(pred)):
err.append(abs((y[i] - pred[i])/(y[i] + 1e-6)))
return sum(err)/ len(err)
class LoganAstral:
def __init__(self):
#going to use these variables a lot
self.MST = pytz.timezone('US/Mountain')
self.logan = astral.LocationInfo(name='Logan, UT', region='US/Mountain', timezone=self.MST, latitude=41.7452, longitude=-111.8097)
self.observer = self.logan.observer
# Astral expects UTC time. We are assuming input is in MST
def timeToUTC(self, mstDT):
return self.MST.normalize(self.MST.localize(mstDT)).astimezone(pytz.utc)
# computes the three
def computeElAzZe(self, dt):
utcDT = self.timeToUTC(dt)
elevation = sun.elevation(self.observer, utcDT)
azimuth = sun.azimuth(self.observer, utcDT)
zenith = sun.zenith(self.observer, utcDT)
return (elevation, azimuth, zenith)
if __name__=='__main__':
year = 2021
month = 3
day = 26
hour = 7
minutes = 19
seconds = 0
dt = datetime(year, month, day, hour, minutes, seconds)
lat = 41.7452
lon = -111.8097
MST = pytz.timezone('US/Mountain')
logan = astral.LocationInfo(name='Logan, UT', timezone=MST, latitude=lat, longitude=lon)
# this is how to convert from local time to UTC, which astral expects
utcdt = MST.normalize(MST.localize(dt)).astimezone(pytz.utc)
print(sun.zenith_and_azimuth(logan.observer, utcdt))
print(sun.elevation(logan.observer, utcdt))
|
[
"70186602+bradley-p@users.noreply.github.com"
] |
70186602+bradley-p@users.noreply.github.com
|
9aac217d250bec6154a2df018e9272e61fac82ab
|
1cee80627744f448efea6fac3c91c471e6b1cba9
|
/resott/asgi.py
|
b78850c84f3b91e7246d72e948d8cb3ffaf41c63
|
[] |
no_license
|
AJ10-1/resott
|
47cf314b47e8352ab9184785f36986a1915101e7
|
9d1839d7459943eec2cf365490836b4ce78129e6
|
refs/heads/master
| 2023-09-01T07:05:57.440647
| 2021-11-03T08:47:12
| 2021-11-03T08:47:12
| 424,101,081
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
"""
ASGI config for resott project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'resott.settings')
application = get_asgi_application()
|
[
"ayushjaiss@gmail.com"
] |
ayushjaiss@gmail.com
|
f0b5d8049387f82fdc10423ed90621cbe0c3bdef
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/nlp/dgu/src/dataset.py
|
b0e7e7d67e9e558b44bf62623dcbbab8f34c71a8
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 21,879
|
py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
dataset used in Bert finetune and evaluation.
"""
import os
from typing import List
import numpy as np
# The input data bigin with '[CLS]', using '[SEP]' split conversation content(
# Previous part, current part, following part, etc.). If there are multiple
# conversation in split part, using 'INNER_SEP' to further split.
INNER_SEP = '[unused0]'
class Dataset():
""" Dataset base class """
def __init__(self):
pass
def __getitem__(self, idx):
raise NotImplementedError("'{}' not implement in class " \
"{}".format('__getitem__', self.__class__.__name__))
def __len__(self):
raise NotImplementedError("'{}' not implement in class " \
"{}".format('__len__', self.__class__.__name__))
def get_label_map(label_list):
""" Create label maps """
label_map = {}
for (i, l) in enumerate(label_list):
label_map[l] = i
return label_map
class UDCv1(Dataset):
"""
The UDCv1 dataset is using in task Dialogue Response Selection.
The source dataset is UDCv1(Ubuntu Dialogue Corpus v1.0). See detail at
http://dataset.cs.mcgill.ca/ubuntu-corpus-1.0/
"""
MAX_LEN_OF_RESPONSE = 60
LABEL_MAP = get_label_map(['0', '1'])
def __init__(self, data_dir, mode='train', label_map_config=None):
super(UDCv1, self).__init__()
self._data_dir = data_dir
self._mode = mode
self.read_data()
self.label_map = None
if label_map_config:
with open(label_map_config) as f:
self.label_map = json.load(f)
else:
self.label_map = None
#read data from file
def read_data(self):
"""read data from file"""
if self._mode == 'train':
data_path = os.path.join(self._data_dir, 'train.txt')
elif self._mode == 'dev':
data_path = os.path.join(self._data_dir, 'dev.txt-small')
elif self._mode == 'test':
data_path = os.path.join(self._data_dir, 'test.txt')
self.data = []
with open(data_path, 'r', encoding='utf8') as fin:
for line in fin:
if not line:
continue
arr = line.rstrip('\n').split('\t')
if len(arr) < 3:
print('Data format error: %s' % '\t'.join(arr))
print(
'Data row contains at least three parts: label\tconversation1\t.....\tresponse.'
)
continue
label = arr[0]
text_a = arr[1:-1]
text_b = arr[-1]
self.data.append([label, text_a, text_b])
@classmethod
def get_label(cls, label):
return cls.LABEL_MAP[label]
@classmethod
def num_classes(cls):
return len(cls.LABEL_MAP)
@classmethod
def convert_example(cls, example, tokenizer, max_seq_length=512):
""" Convert a glue example into necessary features. """
def _truncate_and_concat(text_a: List[str], text_b: str, tokenizer, max_seq_length):
tokens_b = tokenizer.tokenize(text_b)
tokens_b = tokens_b[:min(cls.MAX_LEN_OF_RESPONSE, len(tokens_b))]
tokens_a = []
for text in text_a:
tokens_a.extend(tokenizer.tokenize(text))
tokens_a.append(INNER_SEP)
tokens_a = tokens_a[:-1]
if len(tokens_a) > max_seq_length - len(tokens_b) - 3:
tokens_a = tokens_a[len(tokens_a) - max_seq_length + len(tokens_b) + 3:]
tokens, segment_ids = [], []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
return input_ids, input_mask, segment_ids
label, text_a, text_b = example
label = np.array([cls.get_label(label)], dtype='int64')
input_ids, input_mask, segment_ids = _truncate_and_concat(text_a, text_b, tokenizer, max_seq_length)
return input_ids, input_mask, segment_ids, label
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
class DSTC2(Dataset):
"""
The dataset DSTC2 is using in task Dialogue State Tracking.
The source dataset is DSTC2(Dialog State Tracking Challenges 2). See detail at
https://github.com/matthen/dstc
"""
LABEL_MAP = get_label_map([str(i) for i in range(217)])
def __init__(self, data_dir, mode='train'):
super(DSTC2, self).__init__()
self._data_dir = data_dir
self._mode = mode
self.read_data()
def read_data(self):
"""read data from file"""
def _concat_dialogues(examples):
"""concat multi turns dialogues"""
new_examples = []
max_turns = 20
example_len = len(examples)
for i in range(example_len):
multi_turns = examples[max(i - max_turns, 0):i + 1]
new_qa = '\1'.join([example[0] for example in multi_turns])
new_examples.append((new_qa.split('\1'), examples[i][1]))
return new_examples
if self._mode == 'train':
data_path = os.path.join(self._data_dir, 'train.txt')
elif self._mode == 'dev':
data_path = os.path.join(self._data_dir, 'dev.txt')
elif self._mode == 'test':
data_path = os.path.join(self._data_dir, 'test.txt')
self.data = []
with open(data_path, 'r', encoding='utf8') as fin:
pre_idx = -1
examples = []
for line in fin:
if not line:
continue
arr = line.rstrip('\n').split('\t')
if len(arr) != 3:
print('Data format error: %s' % '\t'.join(arr))
print(
'Data row should contains three parts: id\tquestion\1answer\tlabel1 label2 ...'
)
continue
idx = arr[0]
qa = arr[1]
label_list = arr[2].split()
if idx != pre_idx:
if idx != 0:
examples = _concat_dialogues(examples)
self.data.extend(examples)
examples = []
pre_idx = idx
examples.append((qa, label_list))
if examples:
examples = _concat_dialogues(examples)
self.data.extend(examples)
@classmethod
def get_label(cls, label):
return cls.LABEL_MAP[label]
@classmethod
def num_classes(cls):
return len(cls.LABEL_MAP)
@classmethod
def convert_example(cls, example, tokenizer, max_seq_length=512):
""" Convert a glue example into necessary features. """
def _truncate_and_concat(texts: List[str], tokenizer, max_seq_length):
tokens = []
for text in texts:
tokens.extend(tokenizer.tokenize(text))
tokens.append(INNER_SEP)
tokens = tokens[:-1]
if len(tokens) > max_seq_length - 2:
tokens = tokens[len(tokens) - max_seq_length + 2:]
tokens_, segment_ids = [], []
tokens_.append("[CLS]")
segment_ids.append(0)
for token in tokens:
tokens_.append(token)
segment_ids.append(0)
tokens_.append("[SEP]")
segment_ids.append(0)
tokens = tokens_
input_ids = tokenizer.convert_tokens_to_ids(tokens)
return input_ids, segment_ids
texts, labels = example
input_ids, segment_ids = _truncate_and_concat(texts, tokenizer,
max_seq_length)
labels = [cls.get_label(l) for l in labels]
label = np.zeros(cls.num_classes(), dtype='int64')
for l in labels:
label[l] = 1
input_mask = [1] * len(input_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
return input_ids, input_mask, segment_ids, label
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
class ATIS_DSF(Dataset):
"""
The dataset ATIS_DSF is using in task Dialogue Slot Filling.
The source dataset is ATIS(Airline Travel Information System). See detail at
https://www.kaggle.com/siddhadev/ms-cntk-atis
"""
LABEL_MAP = get_label_map([str(i) for i in range(130)])
def __init__(self, data_dir, mode='train'):
super(ATIS_DSF, self).__init__()
self._data_dir = data_dir
self._mode = mode
self.read_data()
def read_data(self):
"""read data from file"""
if self._mode == 'train':
data_path = os.path.join(self._data_dir, 'train.txt')
elif self._mode == 'dev':
data_path = os.path.join(self._data_dir, 'dev.txt')
elif self._mode == 'test':
data_path = os.path.join(self._data_dir, 'test.txt')
self.data = []
with open(data_path, 'r', encoding='utf8') as fin:
for line in fin:
if not line:
continue
arr = line.rstrip('\n').split('\t')
if len(arr) != 2:
print('Data format error: %s' % '\t'.join(arr))
print(
'Data row should contains two parts: conversation_content\tlabel1 label2 label3.'
)
continue
text = arr[0]
label_list = arr[1].split()
self.data.append([text, label_list])
@classmethod
def get_label(cls, label):
return cls.LABEL_MAP[label]
@classmethod
def num_classes(cls):
return len(cls.LABEL_MAP)
@classmethod
def convert_example(cls, example, tokenizer, max_seq_length=512):
""" Convert a glue example into necessary features. """
text, labels = example
tokens, label_list = [], []
words = text.split()
assert len(words) == len(labels)
for word, label in zip(words, labels):
piece_words = tokenizer.tokenize(word)
tokens.extend(piece_words)
label = cls.get_label(label)
label_list.extend([label] * len(piece_words))
if len(tokens) > max_seq_length - 2:
tokens = tokens[len(tokens) - max_seq_length + 2:]
label_list = label_list[len(tokens) - max_seq_length + 2:]
tokens_, segment_ids = [], []
tokens_.append("[CLS]")
for token in tokens:
tokens_.append(token)
tokens_.append("[SEP]")
tokens = tokens_
label_list = [0] + label_list + [0]
segment_ids = [0] * len(tokens)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
label = np.array(label_list, dtype='int64')
input_mask = [1] * len(input_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
return input_ids, input_mask, segment_ids, label
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
class ATIS_DID(Dataset):
"""
The dataset ATIS_ID is using in task Dialogue Intent Detection.
The source dataset is ATIS(Airline Travel Information System). See detail at
https://www.kaggle.com/siddhadev/ms-cntk-atis
"""
LABEL_MAP = get_label_map([str(i) for i in range(26)])
def __init__(self, data_dir, mode='train'):
super(ATIS_DID, self).__init__()
self._data_dir = data_dir
self._mode = mode
self.read_data()
def read_data(self):
"""read data from file"""
if self._mode == 'train':
data_path = os.path.join(self._data_dir, 'train.txt')
elif self._mode == 'dev':
data_path = os.path.join(self._data_dir, 'dev.txt')
elif self._mode == 'test':
data_path = os.path.join(self._data_dir, 'test.txt')
self.data = []
with open(data_path, 'r', encoding='utf8') as fin:
for line in fin:
if not line:
continue
arr = line.rstrip('\n').split('\t')
if len(arr) != 2:
print('Data format error: %s' % '\t'.join(arr))
print(
'Data row should contains two parts: label\tconversation_content.'
)
continue
label = arr[0]
text = arr[1]
self.data.append([label, text])
@classmethod
def get_label(cls, label):
return cls.LABEL_MAP[label]
@classmethod
def num_classes(cls):
return len(cls.LABEL_MAP)
@classmethod
def convert_example(cls, example, tokenizer, max_seq_length=512):
""" Convert a glue example into necessary features. """
label, text = example
tokens = tokenizer.tokenize(text)
if len(tokens) > max_seq_length - 2:
tokens = tokens[len(tokens) - max_seq_length + 2:]
tokens_, segment_ids = [], []
tokens_.append("[CLS]")
for token in tokens:
tokens_.append(token)
tokens_.append("[SEP]")
tokens = tokens_
segment_ids = [0] * len(tokens)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
label = np.array([cls.get_label(label)], dtype='int64')
input_mask = [1] * len(input_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
return input_ids, input_mask, segment_ids, label
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
def read_da_data(data_dir, mode):
"""read data from file"""
def _concat_dialogues(examples):
"""concat multi turns dialogues"""
new_examples = []
example_len = len(examples)
for i in range(example_len):
label, caller, text = examples[i]
cur_txt = "%s : %s" % (caller, text)
pre_txt = [
"%s : %s" % (item[1], item[2])
for item in examples[max(0, i - 5):i]
]
suf_txt = [
"%s : %s" % (item[1], item[2])
for item in examples[i + 1:min(len(examples), i + 3)]
]
sample = [label, pre_txt, cur_txt, suf_txt]
new_examples.append(sample)
return new_examples
if mode == 'train':
data_path = os.path.join(data_dir, 'train.txt')
elif mode == 'dev':
data_path = os.path.join(data_dir, 'dev.txt')
elif mode == 'test':
data_path = os.path.join(data_dir, 'test.txt')
data = []
with open(data_path, 'r', encoding='utf8') as fin:
pre_idx = -1
examples = []
for line in fin:
if not line:
continue
arr = line.rstrip('\n').split('\t')
if len(arr) != 4:
print('Data format error: %s' % '\t'.join(arr))
print(
'Data row should contains four parts: id\tlabel\tcaller\tconversation_content.'
)
continue
idx, label, caller, text = arr
if idx != pre_idx:
if idx != 0:
examples = _concat_dialogues(examples)
data.extend(examples)
examples = []
pre_idx = idx
examples.append((label, caller, text))
if examples:
examples = _concat_dialogues(examples)
data.extend(examples)
return data
def truncate_and_concat(pre_txt: List[str],
cur_txt: str,
suf_txt: List[str],
tokenizer,
max_seq_length,
max_len_of_cur_text):
"""concat data"""
cur_tokens = tokenizer.tokenize(cur_txt)
cur_tokens = cur_tokens[:min(max_len_of_cur_text, len(cur_tokens))]
pre_tokens = []
for text in pre_txt:
pre_tokens.extend(tokenizer.tokenize(text))
pre_tokens.append(INNER_SEP)
pre_tokens = pre_tokens[:-1]
suf_tokens = []
for text in suf_txt:
suf_tokens.extend(tokenizer.tokenize(text))
suf_tokens.append(INNER_SEP)
suf_tokens = suf_tokens[:-1]
if len(cur_tokens) + len(pre_tokens) + len(suf_tokens) > max_seq_length - 4:
left_num = max_seq_length - 4 - len(cur_tokens)
if len(pre_tokens) > len(suf_tokens):
suf_num = int(left_num / 2)
suf_tokens = suf_tokens[:suf_num]
pre_num = left_num - len(suf_tokens)
pre_tokens = pre_tokens[max(0, len(pre_tokens) - pre_num):]
else:
pre_num = int(left_num / 2)
pre_tokens = pre_tokens[max(0, len(pre_tokens) - pre_num):]
suf_num = left_num - len(pre_tokens)
suf_tokens = suf_tokens[:suf_num]
tokens, segment_ids = [], []
tokens.append("[CLS]")
for token in pre_tokens:
tokens.append(token)
tokens.append("[SEP]")
segment_ids.extend([0] * len(tokens))
for token in cur_tokens:
tokens.append(token)
tokens.append("[SEP]")
segment_ids.extend([1] * (len(cur_tokens) + 1))
if suf_tokens:
for token in suf_tokens:
tokens.append(token)
tokens.append("[SEP]")
segment_ids.extend([0] * (len(suf_tokens) + 1))
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
return input_ids, input_mask, segment_ids
class MRDA(Dataset):
"""
The dataset MRDA is using in task Dialogue Act.
The source dataset is MRDA(Meeting Recorder Dialogue Act). See detail at
https://www.aclweb.org/anthology/W04-2319.pdf
"""
MAX_LEN_OF_CUR_TEXT = 50
LABEL_MAP = get_label_map([str(i) for i in range(5)])
def __init__(self, data_dir, mode='train'):
super(MRDA, self).__init__()
self.data = read_da_data(data_dir, mode)
@classmethod
def get_label(cls, label):
return cls.LABEL_MAP[label]
@classmethod
def num_classes(cls):
return len(cls.LABEL_MAP)
@classmethod
def convert_example(cls, example, tokenizer, max_seq_length=512):
""" Convert a glue example into necessary features. """
label, pre_txt, cur_txt, suf_txt = example
label = np.array([cls.get_label(label)], dtype='int64')
input_ids, input_mask, segment_ids = truncate_and_concat(pre_txt, cur_txt, suf_txt, \
tokenizer, max_seq_length, cls.MAX_LEN_OF_CUR_TEXT)
return input_ids, input_mask, segment_ids, label
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
class SwDA(Dataset):
"""
The dataset SwDA is using in task Dialogue Act.
The source dataset is SwDA(Switchboard Dialog Act). See detail at
http://compprag.christopherpotts.net/swda.html
"""
MAX_LEN_OF_CUR_TEXT = 50
LABEL_MAP = get_label_map([str(i) for i in range(42)])
def __init__(self, data_dir, mode='train'):
super(SwDA, self).__init__()
self.data = read_da_data(data_dir, mode)
@classmethod
def get_label(cls, label):
return cls.LABEL_MAP[label]
@classmethod
def num_classes(cls):
return len(cls.LABEL_MAP)
@classmethod
def convert_example(cls, example, tokenizer, max_seq_length=512):
""" Convert a glue example into necessary features. """
label, pre_txt, cur_txt, suf_txt = example
label = np.array([cls.get_label(label)], dtype='int64')
input_ids, input_mask, segment_ids = truncate_and_concat(pre_txt, cur_txt, suf_txt, \
tokenizer, max_seq_length, cls.MAX_LEN_OF_CUR_TEXT)
return input_ids, input_mask, segment_ids, label
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
|
[
"chenhaozhe1@huawei.com"
] |
chenhaozhe1@huawei.com
|
ae9d6a61eca7fe11f99e20f1e31752dd023a83a1
|
1ec9f86c460a7ca5fadb2ccf9f6cdf9c2c4b3287
|
/backend/users/views.py
|
dc704a594394f7747d430090e38531dd1d68991a
|
[] |
no_license
|
sushant2308/Meet-the-doctor
|
0b53fa7f9200debc8392b79b92bf826e77d8da60
|
1ed16b30ea26434a1ccda298294f1c1550d0857d
|
refs/heads/master
| 2023-08-24T10:21:32.677065
| 2021-10-14T07:43:03
| 2021-10-14T07:43:03
| 384,092,646
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,910
|
py
|
from rest_framework import generics, authentication, permissions
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from rest_framework.response import Response
from rest_framework.decorators import api_view
from .serializers import UserSerializer,SigInSerializer
from .models import User
from rest_framework.status import (
HTTP_400_BAD_REQUEST,
HTTP_404_NOT_FOUND,
HTTP_200_OK,
)
from django.contrib.auth import authenticate
from rest_framework.authtoken.models import Token
class CreateUserView(generics.CreateAPIView):
"""Create a new user in the system"""
serializer_class = UserSerializer
@api_view(['GET', ])
def speciality_doctors(request,slug):
doctors = User.objects.filter(is_doctor=True,speciality=slug)
serializer = UserSerializer(doctors,many=True)
return Response(serializer.data,status=HTTP_200_OK)
@api_view(["POST"])
def signin(request):
signin_serializer = SigInSerializer(data = request.data)
if not signin_serializer.is_valid():
return Response(signin_serializer.errors, status = HTTP_400_BAD_REQUEST)
user = authenticate(
request=request,
username = request.data['email'],
password = request.data['password']
)
if not user:
return Response({'detail': 'Invalid Credentials or activate account'}, status=HTTP_404_NOT_FOUND)
#TOKEN STUFF
user.status=1
user.save()
token, _ = Token.objects.get_or_create(user = user)
user_serialized = UserSerializer(user)
return Response({
'user': user_serialized.data,
'token': token.key
}, status=HTTP_200_OK)
@api_view(['GET', ])
def logout(request):
user=request.user
print(user.status)
user.status=0
user.save()
return Response({"message":"Successfully logged out"},status=HTTP_200_OK)
|
[
"raisushantkumar726@gmail.com"
] |
raisushantkumar726@gmail.com
|
4aa9aa10086ca521fc6643a0560e8adf06af8ee0
|
ceb282df59afb5714dda768c9ee26ae8c3cd14ef
|
/api/src/apps/pages/models.py
|
c612e3e6d43114951e4100adf6d14aa6688753ef
|
[] |
no_license
|
ukiyodigital/float
|
5aaee3080a7028008edee259e14ba5b5dfe323c8
|
1f3be29cba8273ab1b0e837de4eb53f2d49fc24c
|
refs/heads/develop
| 2023-03-14T03:16:02.859606
| 2022-03-21T15:34:03
| 2022-03-21T15:34:03
| 163,778,265
| 2
| 0
| null | 2023-02-28T06:20:45
| 2019-01-02T00:57:46
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 1,284
|
py
|
from django.db import models
from django.contrib.postgres.fields import JSONField
from django.core.serializers.json import DjangoJSONEncoder
from apps.sites.models import Site
from apps.column_headers.models import ColumnHeader
from apps.users.models import User
from apps.column_headers.utils import ColumnManager
class Page(models.Model):
# page_name
name = models.CharField(max_length=15, blank=False)
slug = models.SlugField(max_length=15)
# Foreign Keys
site = models.ForeignKey(Site, on_delete=models.PROTECT, related_name='pages')
users = models.ManyToManyField(User)
class Meta:
unique_together = ('slug', 'site',)
def update_columns(self, columns):
manager = ColumnManager(
model=PageColumnHeader,
column_fields=['name', 'slug', 'order', 'field', 'data'],
)
manager.save_columns(columns, self.id)
class PageColumnHeader(ColumnHeader):
page = models.ForeignKey(Page, on_delete=models.CASCADE, related_name='columns', null=True, blank=True)
data = JSONField(null=True, blank=True, encoder=DjangoJSONEncoder)
class Meta:
# columns cannot have the same parent
unique_together = (
('page', 'slug',),
('parent', 'slug',),
)
|
[
"kevin.a.cunanan@gmail.com"
] |
kevin.a.cunanan@gmail.com
|
d753d0c4da9bb638deab2a12cfdd73f9e4680cb5
|
bac7a7507933ac5bb38b41bbe2a587764da3cf94
|
/snappy_wrappers/wrappers/link_in_bam/wrapper.py
|
09790324734c2213f0b8a7b3f82af6b18a1c8997
|
[
"MIT"
] |
permissive
|
Pregelnuss/snappy-pipeline
|
923b0f36117a2f55ee52f9a8564ed3bb82a8be16
|
31200eba84bff8e459e9e210d6d95e2984627f5c
|
refs/heads/master
| 2023-06-19T07:24:04.736033
| 2021-05-27T07:24:05
| 2021-05-27T07:24:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,063
|
py
|
# -*- coding: utf-8 -*-
"""CUBI+Snakemake wrapper code for external: Snakemake wrapper.py
"""
from snakemake import shell
__author__ = "Oliver Stolpe <oliver.stolpe@bihealth.de>"
shell.executable("/bin/bash")
this_file = __file__
input = snakemake.params.args["input"]
if not input:
raise Exception("No bam found")
shell(
r"""
set -x
# Write out information about conda installation.
conda list >{snakemake.log.conda_list}
conda info >{snakemake.log.conda_info}
# Also pipe stderr to log file
if [[ -n "{snakemake.log.log}" ]]; then
if [[ "$(set +e; tty; set -e)" != "" ]]; then
rm -f "{snakemake.log.log}" && mkdir -p $(dirname {snakemake.log.log})
exec 2> >(tee -a "{snakemake.log.log}" >&2)
else
rm -f "{snakemake.log.log}" && mkdir -p $(dirname {snakemake.log.log})
echo "No tty, logging disabled" >"{snakemake.log.log}"
fi
fi
# Setup auto-cleaned TMPDIR
export TMPDIR=$(mktemp -d)
trap "rm -rf $TMPDIR" EXIT
mkdir -p $TMPDIR/tmp.d
# Link in bam files with the proper file name scheme
ln -sr {input} {snakemake.output.bam}
# Link in resultin BAM file or create index
if [[ -e {input}.bai ]]; then
ln -sr {input}.bai {snakemake.output.bam_bai}
else
samtools index {snakemake.output.bam}
fi
# Build MD5 files
pushd $(dirname {snakemake.output.bam})
md5sum $(basename {snakemake.output.bam}) > $(basename {snakemake.output.bam}).md5
md5sum $(basename {snakemake.output.bam_bai}) > $(basename {snakemake.output.bam_bai}).md5
popd
# QC Report ---------------------------------------------------------------------------------------
# gather statistics from BAM file
# TODO: use pipes for only reading once from disk?
samtools stats {snakemake.output.bam} > {snakemake.output.report_bamstats_txt}
samtools flagstat {snakemake.output.bam} > {snakemake.output.report_flagstats_txt}
samtools idxstats {snakemake.output.bam} > {snakemake.output.report_idxstats_txt}
# call plot-bamstats
mkdir $TMPDIR/bamstats.d
plot-bamstats \
-p $TMPDIR/bamstats.d/ \
{snakemake.output.report_bamstats_txt} \
|| true # ignore failure
# Convert HTML report into one file.
inline-html \
--in-file $TMPDIR/bamstats.d/index.html \
--out-file {snakemake.output.report_bamstats_html} \
|| touch {snakemake.output.report_bamstats_html}
# Build MD5 files for the reports
md5sum {snakemake.output.report_bamstats_html} > {snakemake.output.report_bamstats_html_md5}
md5sum {snakemake.output.report_bamstats_txt} > {snakemake.output.report_bamstats_txt_md5}
md5sum {snakemake.output.report_flagstats_txt} >{snakemake.output.report_flagstats_txt_md5}
md5sum {snakemake.output.report_idxstats_txt} > {snakemake.output.report_idxstats_txt_md5}
# Additional logging for transparency & reproducibility
# Logging: Save a copy this wrapper (with the pickle details in the header)
cp {this_file} $(dirname {snakemake.log.log})/wrapper.py
# Logging: Save a permanent copy of the environment file used
cp $(dirname {this_file})/environment.yaml $(dirname {snakemake.log.log})/environment_wrapper.yaml
"""
)
|
[
"manuel.holtgrewe@bihealth.de"
] |
manuel.holtgrewe@bihealth.de
|
fe617ba47c9efdffab6c275fdc564daa8bb65ee9
|
80301f1cffc5afce13256e2ecab6323c5df00194
|
/cn.3rd/py/A0024.py
|
35dc33ee31bc4810216c072c4f632d116a8f110f
|
[] |
no_license
|
ZhenjianYang/SoraVoiceScripts
|
c1ddf7c1bbcb933243754f9669bd6b75777c87b9
|
94a948090aba0f63b10b2c69dc845dc99c822fc4
|
refs/heads/master
| 2023-04-18T04:54:44.306652
| 2023-04-06T11:15:17
| 2023-04-06T11:15:17
| 103,167,541
| 43
| 11
| null | 2021-03-06T08:52:54
| 2017-09-11T17:36:55
|
Python
|
UTF-8
|
Python
| false
| false
| 27,855
|
py
|
from ED63RDScenarioHelper import *
def main():
SetCodePage("gbk")
# 调试地图
CreateScenaFile(
FileName = 'A0024 ._SN',
MapName = 'map1',
Location = 'T0030.x',
MapIndex = 1,
MapDefaultBGM = "ed60010",
Flags = 0,
EntryFunctionIndex = 0xFFFF,
Reserved = 0,
IncludedScenario = [
'',
'',
'',
'',
'',
'',
'',
''
],
)
BuildStringList(
'@FileName', # 8
'04580尤莉亚待机', # 9
'04581尤莉亚移动', # 10
'04582尤莉亚攻击', # 11
'04583尤莉亚被弹开', # 12
'04584尤莉亚倒下', # 13
'04585尤莉亚魔法咏唱', # 14
'04586尤莉亚魔法发动', # 15
'04570穆拉待机', # 16
'04571穆拉移动', # 17
'04572穆拉攻击', # 18
'04573穆拉被弹开', # 19
'04574穆拉倒下', # 20
'04575穆拉魔法咏唱', # 21
'04576穆拉魔法发动', # 22
'04590希德待机', # 23
'04591希德移动', # 24
'04592希德攻击', # 25
'04593希德被弹开', # 26
'04594希德倒下', # 27
'04595希德魔法咏唱', # 28
'04596希德魔法发动', # 29
'04120凯诺娜待机', # 30
'04121凯诺娜移动', # 31
'04122凯诺娜攻击', # 32
'04123凯诺娜被弹开', # 33
'04124凯诺娜倒下', # 34
'04125凯诺娜魔法咏唱', # 35
'04126凯诺娜魔法发动', # 36
)
DeclEntryPoint(
Unknown_00 = 0,
Unknown_04 = 0,
Unknown_08 = 0,
Unknown_0C = 4,
Unknown_0E = 5,
Unknown_10 = 0,
Unknown_14 = 9500,
Unknown_18 = -10000,
Unknown_1C = 0,
Unknown_20 = 0,
Unknown_24 = 0,
Unknown_28 = 2800,
Unknown_2C = 262,
Unknown_30 = 315,
Unknown_32 = 0,
Unknown_34 = 360,
Unknown_36 = 0,
Unknown_38 = 0,
Unknown_3A = 0,
InitScenaIndex = 0,
InitFunctionIndex = 0,
EntryScenaIndex = 0,
EntryFunctionIndex = 1,
)
AddCharChip(
'ED6_DT27/CH04580 ._CH', # 00
'ED6_DT27/CH04581 ._CH', # 01
'ED6_DT27/CH04582 ._CH', # 02
'ED6_DT27/CH04583 ._CH', # 03
'ED6_DT27/CH04584 ._CH', # 04
'ED6_DT27/CH04585 ._CH', # 05
'ED6_DT27/CH04586 ._CH', # 06
'ED6_DT27/CH04583 ._CH', # 07
'ED6_DT27/CH04583 ._CH', # 08
'ED6_DT27/CH04583 ._CH', # 09
'ED6_DT27/CH04570 ._CH', # 0A
'ED6_DT27/CH04571 ._CH', # 0B
'ED6_DT27/CH04572 ._CH', # 0C
'ED6_DT27/CH04573 ._CH', # 0D
'ED6_DT27/CH04574 ._CH', # 0E
'ED6_DT27/CH04575 ._CH', # 0F
'ED6_DT27/CH04576 ._CH', # 10
'ED6_DT27/CH04573 ._CH', # 11
'ED6_DT27/CH04573 ._CH', # 12
'ED6_DT27/CH04573 ._CH', # 13
'ED6_DT27/CH04590 ._CH', # 14
'ED6_DT27/CH04591 ._CH', # 15
'ED6_DT27/CH04592 ._CH', # 16
'ED6_DT27/CH04593 ._CH', # 17
'ED6_DT27/CH04594 ._CH', # 18
'ED6_DT27/CH04595 ._CH', # 19
'ED6_DT27/CH04596 ._CH', # 1A
'ED6_DT27/CH04593 ._CH', # 1B
'ED6_DT27/CH04593 ._CH', # 1C
'ED6_DT27/CH04593 ._CH', # 1D
'ED6_DT27/CH04120 ._CH', # 1E
'ED6_DT27/CH04121 ._CH', # 1F
'ED6_DT27/CH04122 ._CH', # 20
'ED6_DT27/CH04123 ._CH', # 21
'ED6_DT27/CH04124 ._CH', # 22
'ED6_DT27/CH04125 ._CH', # 23
'ED6_DT27/CH04126 ._CH', # 24
'ED6_DT27/CH04123 ._CH', # 25
'ED6_DT27/CH04123 ._CH', # 26
'ED6_DT27/CH04123 ._CH', # 27
)
AddCharChipPat(
'ED6_DT27/CH04580P._CP', # 00
'ED6_DT27/CH04581P._CP', # 01
'ED6_DT27/CH04582P._CP', # 02
'ED6_DT27/CH04583P._CP', # 03
'ED6_DT27/CH04584P._CP', # 04
'ED6_DT27/CH04585P._CP', # 05
'ED6_DT27/CH04586P._CP', # 06
'ED6_DT27/CH04583P._CP', # 07
'ED6_DT27/CH04583P._CP', # 08
'ED6_DT27/CH04583P._CP', # 09
'ED6_DT27/CH04570P._CP', # 0A
'ED6_DT27/CH04571P._CP', # 0B
'ED6_DT27/CH04572P._CP', # 0C
'ED6_DT27/CH04573P._CP', # 0D
'ED6_DT27/CH04574P._CP', # 0E
'ED6_DT27/CH04575P._CP', # 0F
'ED6_DT27/CH04576P._CP', # 10
'ED6_DT27/CH04573P._CP', # 11
'ED6_DT27/CH04573P._CP', # 12
'ED6_DT27/CH04573P._CP', # 13
'ED6_DT27/CH04590P._CP', # 14
'ED6_DT27/CH04591P._CP', # 15
'ED6_DT27/CH04592P._CP', # 16
'ED6_DT27/CH04593P._CP', # 17
'ED6_DT27/CH04594P._CP', # 18
'ED6_DT27/CH04595P._CP', # 19
'ED6_DT27/CH04596P._CP', # 1A
'ED6_DT27/CH04593P._CP', # 1B
'ED6_DT27/CH04593P._CP', # 1C
'ED6_DT27/CH04593P._CP', # 1D
'ED6_DT27/CH04120P._CP', # 1E
'ED6_DT27/CH04121P._CP', # 1F
'ED6_DT27/CH04122P._CP', # 20
'ED6_DT27/CH04123P._CP', # 21
'ED6_DT27/CH04124P._CP', # 22
'ED6_DT27/CH04125P._CP', # 23
'ED6_DT27/CH04126P._CP', # 24
'ED6_DT27/CH04123P._CP', # 25
'ED6_DT27/CH04123P._CP', # 26
'ED6_DT27/CH04123P._CP', # 27
)
DeclNpc(
X = 4000,
Z = 0,
Y = 4000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 4000,
Z = 0,
Y = 8000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 1,
ChipIndex = 0x1,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 3,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 4000,
Z = 0,
Y = 12000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 2,
ChipIndex = 0x2,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 4000,
Z = 0,
Y = 16000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 3,
ChipIndex = 0x3,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 4,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 4000,
Z = 0,
Y = 20000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 4,
ChipIndex = 0x4,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 5,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 4000,
Z = 0,
Y = 24000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 5,
ChipIndex = 0x5,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 6,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 4000,
Z = 0,
Y = 28000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 6,
ChipIndex = 0x6,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 7,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 8000,
Z = 0,
Y = 4000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 10,
ChipIndex = 0xA,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 8000,
Z = 0,
Y = 8000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 11,
ChipIndex = 0xB,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 3,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 8000,
Z = 0,
Y = 12000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 12,
ChipIndex = 0xC,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 8000,
Z = 0,
Y = 16000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 13,
ChipIndex = 0xD,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 4,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 8000,
Z = 0,
Y = 20000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 14,
ChipIndex = 0xE,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 5,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 8000,
Z = 0,
Y = 24000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 15,
ChipIndex = 0xF,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 6,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 8000,
Z = 0,
Y = 28000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 16,
ChipIndex = 0x10,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 8,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 12000,
Z = 0,
Y = 4000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 20,
ChipIndex = 0x14,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 12000,
Z = 0,
Y = 8000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 21,
ChipIndex = 0x15,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 3,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 12000,
Z = 0,
Y = 12000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 22,
ChipIndex = 0x16,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 12000,
Z = 0,
Y = 16000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 23,
ChipIndex = 0x17,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 4,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 12000,
Z = 0,
Y = 20000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 24,
ChipIndex = 0x18,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 5,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 12000,
Z = 0,
Y = 24000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 25,
ChipIndex = 0x19,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 6,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 12000,
Z = 0,
Y = 28000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 26,
ChipIndex = 0x1A,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 9,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 16000,
Z = 0,
Y = 4000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 30,
ChipIndex = 0x1E,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 16000,
Z = 0,
Y = 8000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 31,
ChipIndex = 0x1F,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 3,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 16000,
Z = 0,
Y = 12000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 32,
ChipIndex = 0x20,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 16000,
Z = 0,
Y = 16000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 33,
ChipIndex = 0x21,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 4,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 16000,
Z = 0,
Y = 20000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 34,
ChipIndex = 0x22,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 5,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 16000,
Z = 0,
Y = 24000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 35,
ChipIndex = 0x23,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 6,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 16000,
Z = 0,
Y = 28000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 36,
ChipIndex = 0x24,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 11,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
ScpFunction(
"Function_0_56A", # 00, 0
"Function_1_56B", # 01, 1
"Function_2_56C", # 02, 2
"Function_3_582", # 03, 3
"Function_4_598", # 04, 4
"Function_5_5B3", # 05, 5
"Function_6_5CE", # 06, 6
"Function_7_61B", # 07, 7
"Function_8_6D7", # 08, 8
"Function_9_793", # 09, 9
"Function_10_84F", # 0A, 10
"Function_11_865", # 0B, 11
"Function_12_921", # 0C, 12
)
def Function_0_56A(): pass
label("Function_0_56A")
Return()
# Function_0_56A end
def Function_1_56B(): pass
label("Function_1_56B")
Return()
# Function_1_56B end
def Function_2_56C(): pass
label("Function_2_56C")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_581")
OP_99(0xFE, 0x0, 0x7, 0x640)
Jump("Function_2_56C")
label("loc_581")
Return()
# Function_2_56C end
def Function_3_582(): pass
label("Function_3_582")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_597")
OP_99(0xFE, 0x0, 0x7, 0x7D0)
Jump("Function_3_582")
label("loc_597")
Return()
# Function_3_582 end
def Function_4_598(): pass
label("Function_4_598")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_5B2")
OP_99(0xFE, 0x0, 0x0, 0x5DC)
Sleep(500)
Jump("Function_4_598")
label("loc_5B2")
Return()
# Function_4_598 end
def Function_5_5B3(): pass
label("Function_5_5B3")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_5CD")
OP_99(0xFE, 0x0, 0x3, 0x3E8)
Sleep(500)
Jump("Function_5_5B3")
label("loc_5CD")
Return()
# Function_5_5B3 end
def Function_6_5CE(): pass
label("Function_6_5CE")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_61A")
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
Jump("Function_6_5CE")
label("loc_61A")
Return()
# Function_6_5CE end
def Function_7_61B(): pass
label("Function_7_61B")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_6D6")
SetChrChipByIndex(0xFE, 5)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
SetChrChipByIndex(0xFE, 6)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(240)
Sleep(1000)
Jump("Function_7_61B")
label("loc_6D6")
Return()
# Function_7_61B end
def Function_8_6D7(): pass
label("Function_8_6D7")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_792")
SetChrChipByIndex(0xFE, 15)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
SetChrChipByIndex(0xFE, 16)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(240)
Sleep(1000)
Jump("Function_8_6D7")
label("loc_792")
Return()
# Function_8_6D7 end
def Function_9_793(): pass
label("Function_9_793")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_84E")
SetChrChipByIndex(0xFE, 25)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
SetChrChipByIndex(0xFE, 26)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(240)
Sleep(1000)
Jump("Function_9_793")
label("loc_84E")
Return()
# Function_9_793 end
def Function_10_84F(): pass
label("Function_10_84F")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_864")
OP_99(0xFE, 0x0, 0x7, 0x640)
Jump("Function_10_84F")
label("loc_864")
Return()
# Function_10_84F end
def Function_11_865(): pass
label("Function_11_865")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_920")
SetChrChipByIndex(0xFE, 35)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
SetChrChipByIndex(0xFE, 36)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(240)
Sleep(1000)
Jump("Function_11_865")
label("loc_920")
Return()
# Function_11_865 end
def Function_12_921(): pass
label("Function_12_921")
TalkBegin(0xFE)
ChrTalk( #0
0xFE,
"你好。\x02",
)
Jump("loc_93A")
label("loc_93A")
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_12_921 end
SaveToFile()
Try(main)
|
[
"zhenjian.c.yang@gmail.com"
] |
zhenjian.c.yang@gmail.com
|
be6a016ce6c16fe2faa6e74c48ad6571cc088641
|
b33ddc7b89d05e19fdeb69593872fd174fab9f4f
|
/URI-py/2875.py
|
49dc31d7091f31bea192a97075a7c40e9e9f21a3
|
[] |
no_license
|
ThiagoCComelli/URI-Online-Judge
|
8b8d609d880342b39ba0d396c0610ecb7e01a5af
|
5348f736b2d683f4b857232c22cccb7c1d8b8d65
|
refs/heads/master
| 2020-07-23T15:14:05.353948
| 2020-03-10T19:42:12
| 2020-03-10T19:42:12
| 207,606,956
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 337
|
py
|
# -*- coding: utf-8 -*-
while True:
try:
n,m = map(int, input().split())
lista = []
lista1= []
for i in range(n):
lista.append(input().split())
while True:
for i in range(n):
for j in range(m):
a =a
except EOFError:
break
|
[
"thiago.comelli@outlook.com"
] |
thiago.comelli@outlook.com
|
6d6cd4acc897db1f094012fabc3bba85a8afe094
|
5a212d29890119f91d61b0d6c8f701277f25b875
|
/piixxie/errors.py
|
166fa7e9bc0ec01fb61375341af416b64945410d
|
[] |
no_license
|
Hooksie/piixxie
|
c922f78971b9cdea31979a6134180b6bea86704c
|
d1f126de0a3e63fc01548c23789f510c89a0f756
|
refs/heads/master
| 2021-01-20T17:58:17.477498
| 2016-06-24T05:04:18
| 2016-06-24T05:04:18
| 61,847,793
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 442
|
py
|
class PiixxieError(Exception):
"""
Generic error base class for anything Piixxie related.
"""
pass
class VerificationError(PiixxieError):
"""
Generic error raised when input image does not meet our requirements
for processing.
"""
pass
class DimensionError(VerificationError):
"""
Error for when input image does not have dimensions which are a multiple
of the pixel size.
"""
pass
|
[
"me@matthooks.com"
] |
me@matthooks.com
|
583338bd6695ced08d352a82aa0bb9b38a8f8527
|
2ef7e7785b1f4bba60bf384cc878ed6948eb7fbe
|
/4 Kyu/stripComments.py
|
78da9397af08d94eb0602c6e9304375dc13b2ec6
|
[] |
no_license
|
Muneer320/CodeWars-Solved-Katas
|
16efcc9eca9ab635fdcb9c17ac9c177cc49a3ae9
|
4162ae7f9b48bbc08e1fa2743ee11b0fc4fd2318
|
refs/heads/main
| 2023-04-05T16:34:02.606131
| 2021-04-22T14:50:07
| 2021-04-22T14:50:07
| 360,554,852
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
py
|
def solution(string,markers):
parts = string.split('\n')
for s in markers:
parts = [v.split(s)[0].rstrip() for v in parts]
return '\n'.join(parts)
print(solution("apples, pears # and bananas\ngrapes\nbananas !apples", ["#", "!"]))
|
[
"noreply@github.com"
] |
noreply@github.com
|
274f35141adb643bb2d94588530768a198f2b6b9
|
657aa6770a486ed812af26c6ec824a5e8bac1eab
|
/venv/Scripts/pip3.8-script.py
|
15e1a9ac428e1c0fb220fb3ed62f5b3bacc114b5
|
[] |
no_license
|
hemangibavasiya/ImageToArray
|
35f93a194de552832584af3d4d468ee2b2826425
|
3b61d575ec8c5fe652c3e16aeff5c263c1cd2e32
|
refs/heads/master
| 2022-12-17T06:37:54.800910
| 2020-09-21T05:53:03
| 2020-09-21T05:53:03
| 297,242,310
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 433
|
py
|
#!C:\Users\Hemangi.Bavasiya\PycharmProjects\ImageToArray\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.8'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.8')()
)
|
[
"hemangibavasiya08@gmail.com"
] |
hemangibavasiya08@gmail.com
|
0a1abc1df723114b5f626549217071f99ce3f6d6
|
1dce03e6f3f5b23d1e5c599678624638943b9422
|
/docker/create_docker_images2.py
|
c963255960a9c9025948e08941e44f9ffe9c6e2f
|
[] |
no_license
|
volat1977/byte_of_python
|
76ec958bdc51c7538bb24e5d152b456feab603ca
|
60b58ca3927ef5e2801c93dd676d5f8b4c03d9fc
|
refs/heads/master
| 2020-12-26T07:23:10.562537
| 2020-03-24T05:31:03
| 2020-03-24T05:31:03
| 237,431,769
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 587
|
py
|
from io import BytesIO
import docker
dockerfile = '''
# Shared Volume
FROM busybox:buildroot-2014.02
VOLUME /data
CMD ["/bin/sh"]
'''
f = BytesIO(dockerfile.encode('utf-8'))
cli = docker.from_env()
response = cli.api.build(fileobj=f, rm=True, tag='test3', decode=True)
#for line in response:
# if line.keys()[0] in ('stream', 'error'):
# value = line.values()[0].strip()
# if value:
# print(value)
# for line in response:
# if line.keys in ('stream', 'error'):
# value = line.values()[0].strip()
# if value:
# print(value)
|
[
"alex@pop-os.localdomain"
] |
alex@pop-os.localdomain
|
19d98f14f17b5614f275bb4b833370621df30e75
|
863e3aaca85d79dd9891cc1dc42dcb6541e253c4
|
/src/shortener/migrations/0001_initial.py
|
33d90b3acf25978d34d5ef51632f90056a9c9d7e
|
[] |
no_license
|
Swain0114/trydjango_100
|
47cf65feb44bf93de680bfbcf33e16ea85294ac6
|
5fbe60a5034bfcb0caa62f3f8529e7495cbfc8e6
|
refs/heads/master
| 2021-01-12T09:21:57.298717
| 2016-12-24T02:03:53
| 2016-12-24T02:03:53
| 76,149,189
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 767
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-12 23:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='shortener',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.CharField(max_length=220)),
('shortcode', models.CharField(max_length=15, unique=True)),
('update', models.DateTimeField(auto_now=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
],
),
]
|
[
"tony820114@gmial.com"
] |
tony820114@gmial.com
|
744364012adc66c65453484e42e764b92591af0a
|
2fdcbdc3a179a861cf0b59ccbafa6f8153e53566
|
/artifacts/admin.py
|
79873b45d4fea619373aff83133d6f33b7063d85
|
[] |
no_license
|
Rasquin/auction
|
c6342ed4737d024c81667f03550d8dc093bb0458
|
f2fc9dc72ab7a34172329045d4e948780dc2c4e2
|
refs/heads/master
| 2022-07-12T07:04:53.202963
| 2020-02-04T15:52:20
| 2020-02-04T15:52:20
| 211,497,820
| 1
| 1
| null | 2022-06-21T23:50:09
| 2019-09-28T12:34:57
|
HTML
|
UTF-8
|
Python
| false
| false
| 121
|
py
|
from django.contrib import admin
from .models import Artifact
# Register your models here.
admin.site.register(Artifact)
|
[
"ubuntu@ip-172-31-42-208.ec2.internal"
] |
ubuntu@ip-172-31-42-208.ec2.internal
|
974761893925c0cb51e9a1d433306bab6ff66024
|
c083f88701e27bbbda10b8b5e90763ad20297b42
|
/dch_002/settings.py
|
02d9223eb0f82dc23588839fbd3b9aacb51e6a4f
|
[] |
no_license
|
Shakeel-Nawaz/dch_002
|
70e9e713f6b7b23b30c180c2509a8484e1b682b5
|
24eda80b9a66f255fd3b79569caf2d20181e6ecd
|
refs/heads/main
| 2023-08-30T05:11:06.316241
| 2021-10-14T08:02:23
| 2021-10-14T08:02:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,497
|
py
|
"""
Django settings for dch_002 project.
Generated by 'django-admin startproject' using Django 3.2.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-8!+(_8^io@ue!diyhu+sw=%=sio7xoix#k)ksly03il#0#k5y('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'channels',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app1'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'dch_002.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# WSGI_APPLICATION = 'dch_002.wsgi.application'
ASGI_APPLICATION = 'dch_002.asgi.application'
CHANNEL_LAYERS = {
"default": {
"BACKEND": "channels_redis.core.RedisChannelLayer",
"CONFIG": {
"hosts": [("127.0.0.1", 6379)],
},
},
}
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"Shakeelnawaz1@gmail.com"
] |
Shakeelnawaz1@gmail.com
|
cfa47b057d7d920775909b59ce508a0a03f128f1
|
385a8d743feb238fb0d939c58b564232aa5f5291
|
/tekton-master/backend/appengine/routes/relatorios/rest.py
|
6b7c0b8c7d68982f9ea5b9c07bc20b1b75b9e237
|
[
"MIT"
] |
permissive
|
lucasgcampos/app-engine-learning
|
7189439e9e431f738f05e0463b6dce8bf6601d8f
|
0c582d6150be152e55464b6bdfb5c6ab1d5c26fb
|
refs/heads/master
| 2016-08-02T22:02:31.816654
| 2014-11-14T03:36:01
| 2014-11-14T03:36:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,149
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from gaebusiness.business import CommandExecutionException
from tekton.gae.middleware.json_middleware import JsonResponse
from relatorio_app import facade
def index():
cmd = facade.list_relatorios_cmd()
relatorio_list = cmd()
short_form=facade.relatorio_short_form()
relatorio_short = [short_form.fill_with_model(m) for m in relatorio_list]
return JsonResponse(relatorio_short)
def save(**relatorio_properties):
cmd = facade.save_relatorio_cmd(**relatorio_properties)
return _save_or_update_json_response(cmd)
def update(relatorio_id, **relatorio_properties):
cmd = facade.update_relatorio_cmd(relatorio_id, **relatorio_properties)
return _save_or_update_json_response(cmd)
def delete(relatorio_id):
facade.delete_relatorio_cmd(relatorio_id)()
def _save_or_update_json_response(cmd):
try:
relatorio = cmd()
except CommandExecutionException:
return JsonResponse({'errors': cmd.errors})
short_form=facade.relatorio_short_form()
return JsonResponse(short_form.fill_with_model(relatorio))
|
[
"lucasgcampos.contato@gmail.com"
] |
lucasgcampos.contato@gmail.com
|
5c3a52dd83cd5f04121594050743968d48bc5958
|
7d21205946b306ca29aace9b4a798b8d9fa5bad2
|
/bot.py
|
5c3636b09b508ad15c20cfeff277833272b75e45
|
[] |
no_license
|
simorautiainen/aimboosterbot
|
9e66108da2780df2a0d0e0428ab1e55e8d2f5533
|
f2512289255126fdfb8d39f90b01c6cf043fa82c
|
refs/heads/master
| 2020-04-14T05:50:09.237499
| 2018-12-31T13:39:34
| 2018-12-31T13:39:34
| 163,670,367
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 712
|
py
|
import cv2
import numpy as np
import pyautogui
image = "dot7.png"
img = cv2.imread(image)
height, width, channels = img.shape
def imagesearch(image):
im = pyautogui.screenshot()
img_rgb = np.array(im)
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread(image, 0)
template.shape[::-1]
res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
if max_val < 0.8:
return [-1,-1]
return max_loc
while True:
pos = imagesearch(image)
while pos[0] == -1:
pos = imagesearch(image)
pyautogui.moveTo(pos[0] + (width / 2), pos[1] + (height / 2))
pyautogui.click()
|
[
"noreply@github.com"
] |
noreply@github.com
|
dafe3dc13683000b2708c82793a3b1ef4ea3dff2
|
ae1e3dc35b67479ee2d15475c29ccf849c9b02a7
|
/ext.py
|
497548aef6f152e29cde53bc33fd30414c5c0986
|
[] |
no_license
|
matrixback/network_printer
|
22d862d7741231b19f352c8369a3cdaad670691d
|
bd0c0cb653033d6b9d42aca0231e5f6af8e728d9
|
refs/heads/master
| 2021-01-24T06:49:06.725377
| 2017-06-06T15:48:07
| 2017-06-06T15:48:07
| 93,324,609
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 74
|
py
|
# coding: utf-8
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
|
[
"18302835641@163.com"
] |
18302835641@163.com
|
1ab744d642d54a043628e569b66babf9d7646fbc
|
1228750f9b95c5c2fb9a1d5cb339275db979356b
|
/anytime_models/examples/resnet-ann.py
|
64befd9e662b1cc774411440061df920bb1e721c
|
[
"MIT"
] |
permissive
|
microsoft/petridishnn
|
be0236b9385c7523ca71cfd171f95beaca5d851a
|
0e0431a56db893ef8ee14501f12bf7046d4d6024
|
refs/heads/master
| 2023-06-29T20:58:01.980267
| 2023-06-12T18:22:32
| 2023-06-12T18:22:32
| 180,651,701
| 123
| 24
|
MIT
| 2023-06-12T18:22:33
| 2019-04-10T19:39:36
|
Python
|
UTF-8
|
Python
| false
| false
| 657
|
py
|
import argparse
import anytime_models.models.anytime_network as anytime_network
from anytime_models.models.anytime_network import AnytimeResNet, AnytimeResNeXt
import ann_app_utils
"""
"""
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser = ann_app_utils.parser_add_app_arguments(parser)
anytime_network.parser_add_resnet_arguments(parser)
args = parser.parse_args()
if args.resnet_version == 'resnet':
model_cls = AnytimeResNet
elif args.resnet_version == 'resnext':
model_cls = AnytimeResNeXt
args.b_type = 'bottleneck'
ann_app_utils.cifar_svhn_train_or_test(args, model_cls)
|
[
"hanzhang@cs.cmu.edu"
] |
hanzhang@cs.cmu.edu
|
93b36a9baec19346f743510cee81a567f11fbd3a
|
d5adda4f7abb3c066b7c3c24e0871cfba0e6ca2d
|
/IPnetwork/get_udp.py
|
1fb90139ca4cda6b073ed7afa071ffddeaf210d4
|
[] |
no_license
|
qwertpas/practice
|
8bb21caa629956787890d631c3026473742ac401
|
29b75ab01a2ce06b8b347aa5ded06451c598d78e
|
refs/heads/master
| 2020-04-09T06:06:45.659528
| 2018-12-02T21:33:20
| 2018-12-02T21:33:20
| 160,098,576
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 293
|
py
|
import socket
import sys
port = 8081
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("", port))
print("socket: ", sock)
running = True
while running:
the_data, the_addr = sock.recvfrom(1024)
print("R: ", the_data, '\t\t A: ', the_addr)
|
[
"christopher.y.x@gmail.com"
] |
christopher.y.x@gmail.com
|
1c18ab29aa811efe6d238546d9645e0ba2238440
|
118b53acb66b52e1a2c87129c680074a4b3a24a1
|
/utils/gen_config.py
|
137e297393c3aeba869cc170266e736288626e87
|
[] |
no_license
|
LomiJA/TTS-Eval
|
7f1be8ed27f1feb0fe656b14107f53963ce566b8
|
07c6e20499162b74a9190771f401aa4c528b56a5
|
refs/heads/master
| 2020-12-31T00:39:57.240621
| 2017-03-27T15:05:07
| 2017-03-27T15:05:07
| 86,559,081
| 1
| 0
| null | 2017-03-29T08:47:45
| 2017-03-29T08:47:45
| null |
UTF-8
|
Python
| false
| false
| 1,197
|
py
|
import os
if __name__ == "__main__":
json_str = "var config = "
json_data = {"baseurl":"data", "exps":[]}
for exp in os.listdir("./data"):
exp_dic = {"path":exp}
exp_dic["styles"] = []
exp_dic["info"] = ""
exp_path = os.path.join("./data", exp)
for stl in os.listdir(exp_path):
exp_dic["styles"].append(stl)
if(exp[:3] == "ABX" or exp[:3] == "MOS"):
exp_dic["type"] = exp[:3]
exp_dic["files"] = []
style = exp_dic["styles"][0]
file_path = os.path.join(exp_path,style)
for fnm in os.listdir(file_path):
exp_dic["files"].append(fnm)
elif(exp[:2] == "CM"):
exp_dic["type"] = "CM"
exp_dic["files"] = []
for stl in exp_dic["styles"]:
file_path = os.path.join(exp_path,stl)
for fnm in os.listdir(file_path):
exp_dic["files"].append(stl + "/" + fnm)
else:
pass
json_data["exps"].append(exp_dic)
json_str += str(json_data) + ";"
handle = open("./scripts/config.js","w")
handle.write(json_str)
handle.close()
|
[
"nanqiao15@126.com"
] |
nanqiao15@126.com
|
d757c2b9d5123a880f8485775e37908e83cfa73b
|
81e5105ba9519dfaae3985e99f36d62ff3283276
|
/rgw/v2/tests/s3_swift/user_op_using_rest.py
|
99e37aa21dfc3b2e691d9b42c6b42da522bb5d96
|
[] |
no_license
|
sunilangadi2/ceph-qe-scripts
|
7fea0786a1a006d9877200cb308d65c21b34937d
|
1edad8710e283f464d42aeee4099b2128e178a95
|
refs/heads/master
| 2022-11-22T16:38:25.911294
| 2020-07-22T07:14:57
| 2020-07-22T07:14:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,173
|
py
|
"""
user_op_using_rest - Test user operation using REST API
Usage: user_op_using_rest.py -c <input_yaml>
<input_yaml>
test_user_with_REST.yaml
Operation:
Create Admin user
Using admin user, create new user using REST request
Using admin user, Modify existing user using REST request
Using admin user, Delete user using REST request
"""
# test REST api operation
import os, sys
import random
import string
sys.path.append(os.path.abspath(os.path.join(__file__, "../../../..")))
from v2.lib.resource_op import Config
import v2.utils.log as log
import v2.utils.utils as utils
import traceback
import argparse
import yaml
import json
#import v2.lib.resource_op as swiftlib
from v2.lib.exceptions import TestExecError, RGWBaseException
from v2.utils.test_desc import AddTestInfo
from v2.lib.s3.write_io_info import IOInfoInitialize, BasicIOInfoStructure
from v2.lib.swift.auth import Auth
#import v2.lib.manage_data as manage_data
from v2.lib.admin import UserMgmt
from rgwadmin import RGWAdmin
#from v2.lib.frontend_configure import Frontend
TEST_DATA_PATH = None
def randomString(stringLength=3):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
def s3_list(l):
a = []
a.append(l['user_id'])
a.append(l['display_name'])
a.append(l['email'])
a.append(l['max_buckets'])
a.append(l['keys'][0]['access_key'])
a.append(l['keys'][0]['secret_key'])
return a
def verify_user(api_user,regular_user):
x = s3_list(api_user)
y = s3_list(regular_user)
if x == y:
return True
else:
return False
def test_exec(config):
io_info_initialize = IOInfoInitialize()
basic_io_structure = BasicIOInfoStructure()
io_info_initialize.initialize(basic_io_structure.initial())
umgmt = UserMgmt()
host, ip = utils.get_hostname_ip()
port = utils.get_radosgw_port_no()
hostname=str(ip)+":"+str(port)
log.info(hostname)
# preparing data
admin_api_user = "admin_user_"+randomString()
log.info(admin_api_user)
user_info = umgmt.create_rest_admin_user(user_id=admin_api_user,
displayname=admin_api_user)
rgw = RGWAdmin(
access_key=user_info['access_key'],
secret_key=user_info['secret_key'],
server=hostname, secure=False, verify=False)
api_user = "api_user_"+randomString()
log.info(api_user)
for uc in range(config.user_count):
#Create User
data=rgw.create_user(
uid=api_user,
display_name=api_user,
email=api_user+'@abc.xyz')
log.info("User created successfully")
log.info(data)
log.info('verification starts')
op = utils.exec_shell_cmd("radosgw-admin user info --uid %s" % api_user)
json_doc = json.loads(op)
log.info(json_doc)
v=verify_user(data, json_doc)
if v is False:
test_info.failed_status('test failed')
sys.exit(1)
log.info("Verification for create operation completed")
#Update User
data = rgw.modify_user(
uid=api_user,
display_name=api_user+"_11",
email=api_user+'_11@umd.edu')
log.info("User Updated successfully")
log.info(data)
log.info('verification starts')
op = utils.exec_shell_cmd("radosgw-admin user info --uid %s" % api_user)
json_doc = json.loads(op)
log.info(json_doc)
v = verify_user(data, json_doc)
if v is False:
test_info.failed_status('test failed')
sys.exit(1)
log.info("Verification for Update operation completed")
#delete User
data = rgw.remove_user(uid=api_user, purge_data=False)
log.info(data)
log.info("User removed")
op = utils.exec_shell_cmd("radosgw-admin user list")
json_doc = json.loads(op)
if api_user in json_doc:
test_info.failed_status('test failed')
sys.exit(1)
log.info("Verification for Delete operation completed")
if __name__ == '__main__':
test_info = AddTestInfo('test REST api operation')
try:
project_dir = os.path.abspath(os.path.join(__file__, "../../.."))
test_data_dir = 'test_data'
TEST_DATA_PATH = (os.path.join(project_dir, test_data_dir))
log.info('TEST_DATA_PATH: %s' % TEST_DATA_PATH)
if not os.path.exists(TEST_DATA_PATH):
log.info('test data dir not exists, creating.. ')
os.makedirs(TEST_DATA_PATH)
parser = argparse.ArgumentParser(description='RGW S3 Automation')
parser.add_argument('-c', dest="config",
help='RGW Test yaml configuration')
args = parser.parse_args()
yaml_file = args.config
config = Config(yaml_file)
config.read()
test_exec(config)
test_info.success_status('test passed')
sys.exit(0)
except (RGWBaseException, Exception) as e:
log.info(e)
log.info(traceback.format_exc())
test_info.failed_status('test failed')
sys.exit(1)
|
[
"ukurundw@redhat.com"
] |
ukurundw@redhat.com
|
e61d9c8b65dd2e6ddb62065629685896f512ffb7
|
0fe37e11df976c55fe5bbe492879b7cd8a95b7c5
|
/1_2_python变量_输出和输入_数字_字符串/04_str_test.py
|
3444adc19895857e5d4fee8cb2347e41708b2bfb
|
[] |
no_license
|
1286211699/mmc_code
|
9bb7761107604b445dea4fe5acf9d503fbc28dfa
|
ee97879632dfd7d24c604f7db52c82fa29109daa
|
refs/heads/master
| 2022-12-08T23:19:06.382825
| 2020-05-08T13:59:46
| 2020-05-08T13:59:46
| 177,100,815
| 2
| 0
| null | 2022-12-08T01:42:47
| 2019-03-22T08:25:37
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,896
|
py
|
# name = 'for'
#
# name = "for's name is for"
# print(name)
# print('abcd\tefg')
# print('My name is %s'%('for'))
# print('I am %d years old'%(18))
# print('his height is %f m'%(1.78))
# print('his height is %.2f m'%(1.78))
# name = 'while'
#
# print(name[1:3])
# str_test = 'hello world world'
#
# print(str_test.partition('o'))
# print(str_test.rpartition('o'))
# my_str = 'hello:world:python '
# print(my_str)
# print(my_str.replace('l','w'))
# # print(my_str.splitlines())
# # print(my_str.split(':'))
# print(str_test.count('l'))
#
# print(str_test.find('w'))
#
# print(str_test.rfind('w'))
#
# print(str_test.index('o'))
# print(str_test.rindex('o'))
# print(str_test[::-1])
# print(str_test[::-2])
#
# print(str_test[1:9:-1])
# print(str_test[9:1:-1])
# print(str_test[0:7])
#
# print(str_test[:7])
#
# print(str_test[2:])
#
# print(str_test[:])
# print(str_test[::2])
# print(str_test[0:7:2])
# str_test = ' for '
# print(str_test.strip())#在以后的数据清洗中战友很大的比重
# print(str_test.rstrip())
# print(str_test.lstrip())
# print(str_test.center(10,'x'))
# print(str_test.ljust(10,'x'))
# print(str_test.rjust(10,'x'))
# print(str_test.zfill(10))
#
# python = '{} is {}'
#
# print(python.format('for','cool'))
#
# print('hello'.upper())
# print('HELLO'.lower())
#
# print('12345a'.isalnum())
# print('abcdef'.isalpha())
# print('12345'.isdigit())
# print('HELLO'.isupper())
# print('hello'.islower())
# print(' '.isspace())
#
# print('for is cool'[3:].startswith(' '))
# print('for is cool'[3:].endswith('cool'))
# print(ord('a'))
# print(chr(97))
u = '学神'
str1 = u.encode()
print(str1)
str2 = u.encode()
print(str2)
u1 = str1.decode('gbk')
print(u1)
u2 = str2.decode('utf-8')
print(u2)
|
[
"1286211699@qq.com"
] |
1286211699@qq.com
|
c276920814b35fe507549c51ba57f9cb4f8203e7
|
068c4665dc7b803df0fc02524cfdb01fff1674da
|
/Fraction.py
|
0be0f332c82ec1f3e67dc3b912217171f70c3dc5
|
[] |
no_license
|
TingYang227/Python
|
86512e01adf676cee943fa9ab78ce018f19dcc91
|
6bc48c6f688f9a3088e34a2117c861b97ddcdc75
|
refs/heads/master
| 2020-04-20T20:26:53.693987
| 2019-11-07T17:50:53
| 2019-11-07T17:50:53
| 169,076,968
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,145
|
py
|
def gcd(m, n):
while m % n != 0:
oldm = m
oldn = n
m = oldn
n = oldm % oldn
return n
# print(gcd(20, 10))
class Fraction:
def __init__(self, top, bottom):
self.num = top
self.den = bottom
def __str__(self):
return str(self.num) + "/" + str(self.den)
def show(self):
print(self.num, "/", self.den)
def __add__(self, otherfraction):
newnum = self.num*otherfraction.den + self.den*otherfraction.num
newden = self.den * otherfraction.den
common = gcd(newnum, newden)
return Fraction(newnum//common, newden//common)
def __mul__(self, other):
newnum = self.num * other.num
newden = self.den * other.den
common = gcd(newnum, newden)
return Fraction(newnum//common, newden//common)
def __sub__(self, other):
newnum = self.num * other.den - other.num * self.den
newden = self.den * self.num
common = gcd(newnum, newden)
return Fraction(newnum//common, newden//common)
def __truediv__(self, other):
newnum = self.num * other.den
newden = self.den * other.num
common = gcd(newnum, newden)
return Fraction(newnum//common, newden//common)
def __eq__(self, other):
firstnum = self.num * other.den
secondnum = other.num * self.den
return firstnum == secondnum
def __lt__(self, other):
firstnum = self.num * other.den
secondnum = other.num * self.den
return firstnum < secondnum
def __gt__(self, other):
firstnum = self.num * other.den
secondnum = other.num * self.den
return firstnum > secondnum
def getNum(self):
return self.num
def getDen(self):
return self.den
x = Fraction(1, 2)
y = Fraction(2, 3)
print(x + y)
print(x == y)
print(x * y)
print(y - x)
print(x - y)
print(x / y)
print(x > y)
print(x < y)
|
[
"39301486+TingYang227@users.noreply.github.com"
] |
39301486+TingYang227@users.noreply.github.com
|
f49836386eb4a843e803fa9e83c186e024a5b259
|
6ef0bbc5be7ba14286725cd37b01522bda1bd405
|
/judgements/indicator.py
|
0be983132959fc9802d2e58873722dc9a75e1fcb
|
[] |
no_license
|
Shanney/StockCenter
|
b27646ed91899221e37af6685d533a46d1bb10a9
|
e757430d733405b2219fae15951c9c460783171b
|
refs/heads/master
| 2021-06-29T23:59:40.503090
| 2021-01-03T12:59:06
| 2021-01-03T12:59:06
| 203,547,447
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,094
|
py
|
import numpy as np
def consecutive_five_year_roe(indicators):
# 返回连续五年ROE,应该只关注roe大于15%的企业
result = {}
roe_positive_flag = True
consecutive_detail = ''
for indicator in indicators:
# print(indicator.loc[0].statDate + ' ' + str(indicator.loc[0].roe))
if indicator.loc[0].roe < 15:
roe_positive_flag = False
consecutive_detail += str(indicator.loc[0].roe) + ' '
result['roe_positive_flag'] = roe_positive_flag
result['consecutive_detail'] = consecutive_detail
return result
def ent_mode(income, cash_flow, balance_two, indicator):
"""
roe可以看成是三个部分乘积组成
1.产品净利润率(净利润/销售收入)
2.总资产周转率(销售收入/平均总资产)
3.杠杆系数(平均总资产/净资产)
即查看企业模式,茅台模式,沃尔玛模式,银行模式
但是净资产没法算啊。。。。如果用净利润/ROE呢?是平均净资产
:param indicator: 财务指标表
:param balance_two: 连续两年的资产负债表,为了使用期初和期末数据
:param cash_flow: 现金流量表
:param income: 利润表
:return:
"""
ind_one = np.nan_to_num(income.net_profit) / np.nan_to_num(cash_flow.goods_sale_and_service_render_cash)
# 平均总资产=(期初+期末)/2
ave_asset = (np.nan_to_num(balance_two[0].loc[0].total_sheet_owner_equities) + np.nan_to_num(
balance_two[1].loc[0].total_sheet_owner_equities)) / 2
ind_two = np.nan_to_num(cash_flow.goods_sale_and_service_render_cash) / np.nan_to_num(ave_asset)
ave_net_asset = np.nan_to_num(income.net_profit) / np.nan_to_num(indicator.roe)
ind_three = np.nan_to_num(ave_asset) / np.nan_to_num(ave_net_asset)
return {'ind_one': str(ind_one), 'ind_two': str(ind_two), 'ind_three': str(ind_three)}
# print('产品利润率:' + str(ind_one))
# print('总资产周转率' + str(ind_two))
# print('杠杆系数' + str(ind_three))
|
[
"49220598@qq.com"
] |
49220598@qq.com
|
0f7094b034b985bf56a41a69f249db48b9d49c8b
|
2d8ad2abcf35fa4cbaad865b651cdb6f0dcff88a
|
/ibitcy_tests/pages/payment_page.py
|
933d908a5025396ff39c0ae7bca05536466528d4
|
[] |
no_license
|
Raioln/ibitcy_tests
|
a5c5902c9690297649594ab22d84c08f47ce2b41
|
6972f7561a1c517949087b05da420880b7ed676e
|
refs/heads/master
| 2020-08-04T17:33:32.140471
| 2019-10-01T23:59:54
| 2019-10-01T23:59:54
| 212,221,557
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 316
|
py
|
from selenium.webdriver.common.by import By
from pages.base_page import BasePage
from utils.locator import Locator
class PaymentPage(BasePage):
status_selector = Locator(By.CLASS_NAME, 'status-selector', 'Селектор статусов')
gold_item = Locator(By.CLASS_NAME, 'gold', 'Статус Gold')
|
[
"d.evlashkin@cian.ru"
] |
d.evlashkin@cian.ru
|
a78236e4cafcb2ac69887a145feeb786c907399e
|
6dda2ac01f624757069a9f9a7328b5a574a480c0
|
/week-04/day-04/11.py
|
0fac651072311c1af79f160402bc616d7a50041d
|
[] |
no_license
|
greenfox-zerda-lasers/brigittaforrai
|
976b8e0dacbf791a76e5e59c3f034cadd106b8e6
|
a2213ba268f2e777b1190a79d9ff0360f593cad5
|
refs/heads/master
| 2021-01-12T18:18:49.042219
| 2017-02-19T15:36:49
| 2017-02-19T15:36:49
| 71,362,581
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
from tkinter import *
root = Tk()
size = 600
canvas = Canvas(root,width=size, height=size, bg="yellow")
canvas.pack()
def draw(x,y,size):
canvas.create_rectangle(x,y,x+size,y+size)
if size > 5:
draw(x,y+size/3,size/3)
draw(x+(size*(2/3)),y+size/3,size/3)
draw(x+size/3,y,size/3)
draw(x+size/3,y+(size*(2/3)),size/3)
draw(0,0,600)
root.mainloop()
|
[
"forraibrigi@gmail.com"
] |
forraibrigi@gmail.com
|
7030689c1007a648531f281ccdefe78c8ca50ba3
|
6abccf219d813a7d328c8fc351cba992e77fa18a
|
/utilities/teststatus.py
|
1c0996f1b51278c8a180533d21d7ffbb1aad6f08
|
[] |
no_license
|
thotha3/pythonProject
|
65bee0d9533590b44a9d884007d03dfe70e2509b
|
902f551430a43e6d3012145603acb728c67537b5
|
refs/heads/master
| 2023-08-08T01:40:32.882290
| 2021-09-16T20:26:29
| 2021-09-16T20:26:29
| 407,305,684
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,319
|
py
|
"""
@package utilities
Checkpoint class implementation
It provides functionality to assert the result
Example: self.check_point.markFinal("Test Name", result, "Message")
"""
import logging
from base.selenium_driver import SeleniumDriver
from utilities import custom_logger as cl
class TestStatus(SeleniumDriver):
log = cl.customLogger(logging.INFO)
def __init__(self, driver):
"""
Inits CheckPoint class
:param driver:
"""
super(TestStatus, self).__init__(driver)
self.resultList = []
def setResult(self, result, resultMessage):
try:
if result is not None:
if result:
self.resultList.append("PASS")
self.log.info('### VERIFICATION SUCCESSFUL :: ' + resultMessage)
else:
self.resultList.append("FAIL")
self.log.error('### VERIFICATION FAILED :: ' + resultMessage)
self.screenShot(resultMessage)
else:
self.resultList.append("FAIL")
self.log.info('### VERIFICATION FAILED :: ' + resultMessage)
self.screenShot(resultMessage)
except:
self.resultList.append("FAIL")
self.log.error('### EXCEPTION OCCURRED !!!')
self.screenShot(resultMessage)
def mark(self, result, resultMessage):
"""
Mark the result of the verification point in a test case
:param result:
:param resultMessage:
:return:
"""
self.setResult(result, resultMessage)
def markFinal(self, testName, result, resultMessage):
"""
Mark the final result of the verification point ina test case
This needs to be called at least once in a test case
This should be final test status of the test case
:param testname:
:param result:
:param resultMessage:
:return:
"""
self.setResult(result, resultMessage)
if 'FAIL' in self.resultList:
self.log.error(testName + ' ### FAILED')
self.resultList.clear()
assert True == False
else:
self.log.error(testName + ' ### PASSED')
self.resultList.clear()
assert True == True
|
[
"thotha3@hotmail.com"
] |
thotha3@hotmail.com
|
3c061683d05e01d2e49fdf44a9642b8ba3230d38
|
7942342d457276bb266228d0236af647b3d55477
|
/django/contrib/auth/__init__.pyi
|
24b49bc00c2f2782b020918d77e8d81ac3a388da
|
[
"MIT"
] |
permissive
|
AsymmetricVentures/mypy-django
|
847c4e521ce4dec9a10a1574f9c32b234dafd00b
|
f6e489f5cf5672ecede323132665ccc6306f50b8
|
refs/heads/master
| 2020-06-30T01:53:44.434394
| 2016-12-22T22:45:50
| 2016-12-22T22:45:50
| 74,397,884
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 840
|
pyi
|
# Stubs for django.contrib.auth (Python 3.6)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from typing import Any, Optional
from django.apps import apps as django_apps
from .signals import user_logged_in as user_logged_in, user_logged_out as user_logged_out, user_login_failed as user_login_failed
SESSION_KEY = ... # type: str
BACKEND_SESSION_KEY = ... # type: str
HASH_SESSION_KEY = ... # type: str
REDIRECT_FIELD_NAME = ... # type: str
def load_backend(path): ...
def get_backends(): ...
def authenticate(**credentials): ...
def login(request, user, backend: Optional[Any] = ...): ...
def logout(request): ...
def get_user_model(): ...
def get_user(request): ...
def get_permission_codename(action, opts): ...
def update_session_auth_hash(request, user): ...
default_app_config = ... # type: str
|
[
"reames@asymmetricventures.com"
] |
reames@asymmetricventures.com
|
6915ead1ba750b7569a4d25b34f4be68242230f5
|
a4133ac0cfce656b47fe2ea6161a9f1656afa0e8
|
/video.py
|
db4c55c6c781a82aeb60d2f364d3fcecfe4c2487
|
[] |
no_license
|
xHascox/Simple-HDR-Video
|
531d4b5baba2fd5ed2eac473484f65a54e318b86
|
aac2e6a1acfb6c69de214ac29bf6ba6892723886
|
refs/heads/main
| 2023-03-21T13:54:20.355709
| 2021-03-13T01:59:58
| 2021-03-13T01:59:58
| 346,901,991
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 918
|
py
|
import cv2
import tkinter
from tkinter.filedialog import askopenfilename
def play_videoFile(filePath,mirror=False):
cap = cv2.VideoCapture(filePath)
#modify:
width = 1920
height = 1080
#cv2.namedWindow('VideoHDR',cv2.WINDOW_AUTOSIZE)
cv2.namedWindow('VideoHDR',cv2.WINDOW_NORMAL)
while True:
ret_val, frame = cap.read()
if mirror:
frame = cv2.flip(frame, 1)
cv2.imshow('VideoHDR', frame)
k = cv2.waitKey(1)
if k == 27:
break # esc to quit
if k == 32:
#space to pause
while cv2.waitKey(1) != 32:
pass
cv2.destroyAllWindows()
def main():
filename = askopenfilename(initialdir = "/",title = "Select file",filetypes = (("matroska files","*.mkv"),("all files","*.*")))
play_videoFile(filename,mirror=False)
if __name__ == '__main__':
main()
|
[
"mg.marco@hotmail.ch"
] |
mg.marco@hotmail.ch
|
16ac6d820543f041aa2c474fcb8afa4d895ce380
|
e3c9665e6c3b2a9a632ae00a3e896feb32cbb745
|
/foodgram/recipes/migrations/0020_auto_20210409_1023.py
|
52b73d6c650a816a113a40cd2b31ece2ea474ec9
|
[] |
no_license
|
girik108/foodgram-project
|
dc1addde0f99cf0ce74888119610c024ab5984c4
|
6f5b44da90563c25b9c7d66591244b85c7d63560
|
refs/heads/master
| 2023-04-10T00:35:44.916391
| 2021-04-19T06:26:31
| 2021-04-19T06:26:31
| 338,977,103
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
# Generated by Django 3.1.6 on 2021-04-09 06:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recipes', '0019_auto_20210408_0937'),
]
operations = [
migrations.AlterField(
model_name='ingredient',
name='title',
field=models.CharField(max_length=100, verbose_name='Наименование'),
),
]
|
[
"gimatov@list.ru"
] |
gimatov@list.ru
|
b5e8c503a72c662e758f0301bb837a77098edce3
|
4e980eca143b2e3fd9523014d4a9e22a79089328
|
/pontuacoes/apps.py
|
80570d53af2bd7de854f3cd52a6728421acdcefa
|
[] |
no_license
|
silasgon/gamep-admin
|
5d1f9149c0a10260a93f4020108806df3b8c15de
|
9f3c9970b92dfb7254c4ccf081446303a25df8b9
|
refs/heads/master
| 2020-04-08T08:33:53.411726
| 2018-11-13T13:29:57
| 2018-11-13T13:29:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 95
|
py
|
from django.apps import AppConfig
class PontuacoesConfig(AppConfig):
name = 'pontuacoes'
|
[
"kavalerskialexandre@gmail.com"
] |
kavalerskialexandre@gmail.com
|
7f4785759eb9b5506425258ad834ea689dbb737f
|
3452e3335bce9dc6405175ea3b7d1a4bf75988dd
|
/core/creature/__init__.py
|
4ce294630de5da88e39eac68fba9f57f3ac62f54
|
[] |
no_license
|
mwerezak/arena
|
7480723b98f51aee259812b2890bdb1c08f201b9
|
31e27a9bdb83c9e9d28a1419d1dabdddf2906d82
|
refs/heads/master
| 2023-04-10T00:33:07.199527
| 2021-04-15T12:27:28
| 2021-04-15T12:27:28
| 358,059,683
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 77
|
py
|
from core.creature.creature import Creature
from core.constants import Stance
|
[
"mwerezak@gmail.com"
] |
mwerezak@gmail.com
|
ae63166e12243568d153ba12655979e284186b4d
|
4529f9b7a19536b01873bc23440f2192a98d3c50
|
/Easy/746_Min Cost Climbing Stairs.py
|
a1249d945a74fceb13cd93e8891a09e44754e11b
|
[] |
no_license
|
j611062000/leetcode
|
c6bf315ce682dc362ac5dcd856c30c2af1aad90c
|
cbaa63d4f094f58d48037119b60aed73edb166e5
|
refs/heads/master
| 2020-03-31T01:50:12.088992
| 2018-11-17T03:48:35
| 2018-11-17T03:48:35
| 151,796,637
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 868
|
py
|
"""
To construc the answer for n data (i.e. P(n)), two secenarios are introduced to simplified
the calculation.
First (one step to the end): The minimal cost of this scenario is S(n-1) + X(n).
Second (two step to the end): The minimal cost of this scenario is S(n-2) + X(n-1).
data = [X(1), X(2), ..., X(n-2), X(n-1), X(n)]
"""
class Solution(object):
def minCostClimbingStairs(self, cost):
"""
:type cost: List[int]
:rtype: int
"""
# temp[]: the cost of length(i)
n_1 = cost[1]
n_2 = cost[0]
temp = None
for element in cost[2:]:
temp = n_1
n_1 = min(n_1, n_2) + element
n_2 = temp
return min(n_1, n_2)
if __name__ == "__main__":
data = [1, 100, 1, 1, 1, 100, 1, 1, 100, 1]
sol = Solution().minCostClimbingStairs(data)
print(sol)
|
[
"j611062000@gmail.com"
] |
j611062000@gmail.com
|
48f499336b8be9120c3c86fe72d451b976c35f50
|
6a893f1219c1fc94b60f19c95596fabb1a18b241
|
/Assignment2/main.py
|
c6bb8b7a6adbe8b47c13ec8fcea92ea4b467ca11
|
[] |
no_license
|
WangZesen/DD2424-Assignment
|
3f4f30442578b7d11871da5c9d69b3fc797b6942
|
e1b284b5b0e7174dbcdf665402efb12cb696c36a
|
refs/heads/master
| 2020-03-11T21:37:18.077471
| 2018-04-19T20:52:43
| 2018-04-19T20:52:43
| 130,270,744
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,639
|
py
|
import random, math
import numpy as np
import copy as cp
import scipy.io as sio
import matplotlib.pyplot as plt
def activateRelu(input_data):
# input_data: d_in * N
output_data = cp.deepcopy(input_data)
output_data[output_data <= 0] *= 0.00 # change to 0.01 if it's leaky ReLU
return output_data
def fullyConnect(input_data, W, b):
# input_data: d_in * N
# W: d_out * d_in
# b: d_out * 1
assert input_data.shape[0] == W.shape[1]
assert W.shape[0] == b.shape[0]
output_data = np.dot(W, input_data) + b
return output_data
def softmax(input_data):
# input_data: K * N
output_data = np.exp(input_data)
for i in range(input_data.shape[1]):
output_data[:, i] = output_data[:, i] / sum(output_data[:, i])
return output_data
def crossEntropyLoss(output_data, label):
# input_data: K * N
# label: one-hot
assert output_data.shape == label.shape
out = - np.log(output_data)
out = np.multiply(out, label)
out = np.sum(out)
return out / output_data.shape[1]
def regularisationLoss(W, lambda_):
# W: d_out * d_in
loss = sum([np.sum(np.square(w)) for w in W]) * lambda_
return loss
def evaluateClassifierVerbose(X, W, b):
fc = []
act = []
last = X
fc.append(fullyConnect(X, W[0], b[0]))
act.append(activateRelu(fc[0]))
fc.append(fullyConnect(act[0], W[1], b[1]))
p = softmax(fc[1])
return fc, act, p
def evaluateClassifier(X, W, b):
fc = []
act = []
last = X
fc.append(fullyConnect(X, W[0], b[0]))
act.append(activateRelu(fc[0]))
fc.append(fullyConnect(act[0], W[1], b[1]))
p = softmax(fc[1])
return p
def computeLoss(X, Y, W, b, lambda_):
p = evaluateClassifier(X, W, b)
loss = crossEntropyLoss(p, Y) + regularisationLoss(W, lambda_)
return loss
def regularisationLossGradient(W, lambda_):
grad_W = []
for i in range(len(W)):
grad_W.append(2 * lambda_ * W[i])
return grad_W
def softmaxCrossEntropyLossGradient(p, Y):
return p - Y
def activationReluGradient(lastGrad, fc):
grad = cp.deepcopy(lastGrad)
grad[fc <= 0] *= 0.00 # change to 0.01 if it's leaky ReLU
return grad
def fullyConnectGradient(lastGrad, W):
return np.dot(W.T, lastGrad)
def computeGradient(X, Y, W, b, lambda_):
d = X.shape[0]
K = Y.shape[0]
m = 50
grad_W = [np.zeros((m, d)), np.zeros((K, m))]
grad_b = [np.zeros((m, 1)), np.zeros((K, 1))]
for i in range(X.shape[1]):
fc, act, p = evaluateClassifierVerbose(X[:, i : i+1], W, b)
grad = softmaxCrossEntropyLossGradient(p, Y[:, i : i+1])
# grad = activationReluGradient(grad, fc[1])
grad_W[1] = grad_W[1] + np.dot(grad, act[0].T)
grad_b[1] = grad_b[1] + grad
grad = fullyConnectGradient(grad, W[1])
grad = activationReluGradient(grad, fc[0])
grad_W[0] = grad_W[0] + np.dot(grad, X[:, i : i+1].T)
grad_b[0] = grad_b[0] + grad
grad_W[0] = grad_W[0] / X.shape[1]
grad_W[1] = grad_W[1] / X.shape[1]
grad_b[0] = grad_b[0] / X.shape[1]
grad_b[1] = grad_b[1] / X.shape[1]
grad_RW = regularisationLossGradient(W, lambda_)
grad_W[0] = grad_W[0] + grad_RW[0]
grad_W[1] = grad_W[1] + grad_RW[1]
return grad_W, grad_b
def computeGradsNumSlow(X, Y, W, b, lambda_, h):
grad_W = [np.zeros(W[i].shape) for i in range(len(W))]
grad_b = [np.zeros(b[i].shape) for i in range(len(b))]
for k in range(len(W)):
for i in range(W[k].shape[0]):
for j in range(W[k].shape[1]):
W[k][i][j] -= h
c1 = computeLoss(X, Y, W, b, lambda_)
W[k][i][j] += h + h
c2 = computeLoss(X, Y, W, b, lambda_)
W[k][i][j] -= h
grad_W[k][i][j] = (c2 - c1) / (2 * h)
for i in range(b[k].shape[0]):
for j in range(b[k].shape[1]):
b[k][i][j] -= h
c1 = computeLoss(X, Y, W, b, lambda_)
b[k][i][j] += h + h
c2 = computeLoss(X, Y, W, b, lambda_)
b[k][i][j] -= h
grad_b[k][i][j] = (c2 - c1) / (2 * h)
return grad_W, grad_b
def computeAccuracy(X, y, W, b):
p = evaluateClassifier(X, W, b)
count = 0
for i in range(X.shape[1]):
if np.argmax(p[:, i]) == y[i]:
count = count + 1
return count / X.shape[1]
def miniBatchGD(train_X, train_Y, train_y, val_X, val_Y, val_y, W, b, lambda_, params, verbose = False, early_stop = False):
N = train_X.shape[1]
last_grad_W = [np.zeros(W[i].shape) for i in range(len(W))]
last_grad_b = [np.zeros(b[i].shape) for i in range(len(b))]
Wstar = cp.deepcopy(W)
bstar = cp.deepcopy(b)
Wbest = cp.deepcopy(W)
bbset = cp.deepcopy(b)
best_acc = 0
best_epoch = 0
eta = params['eta']
train_loss = []
val_loss = []
for i in range(params['n_epochs']):
for j in range(N // params['n_batch']):
batch_X = train_X[:, j * params['n_batch'] : (j + 1) * params['n_batch']]
batch_Y = train_Y[:, j * params['n_batch'] : (j + 1) * params['n_batch']]
grad_W, grad_b = computeGradient(batch_X, batch_Y, Wstar, bstar, lambda_)
for k in range(len(W)):
grad_W[k] = eta * grad_W[k] + params['momentum'] * last_grad_W[k]
grad_b[k] = eta * grad_b[k] + params['momentum'] * last_grad_b[k]
Wstar[k] = Wstar[k] - grad_W[k]
bstar[k] = bstar[k] - grad_b[k]
last_grad_W = cp.deepcopy(grad_W)
last_grad_b = cp.deepcopy(grad_b)
if (i + 1) % params['decay_gap'] == 0:
eta = eta * params['decay']
if verbose:
train_loss.append(computeLoss(train_X, train_Y, Wstar, bstar, lambda_))
val_loss.append(computeLoss(val_X, val_Y, Wstar, bstar, lambda_))
val_acc = computeAccuracy(val_X, val_y, Wstar, bstar)
if val_acc > best_acc:
Wbest = cp.deepcopy(Wstar)
bbest = cp.deepcopy(bstar)
best_epoch = i
best_acc = val_acc
print ("Current Best Validation Accuracy at Epoch {}: {}".format(i + 1, best_acc))
elif (i - best_epoch > 10) and early_stop:
print ("Early stopping at epoch {}".format(i + 1))
return Wstar, bstar, train_loss, val_loss, Wbest, bbest
print ("Epoch {} Finished, Train Loss: {}, Validation Loss: {}".format(i + 1, train_loss[-1], val_loss[-1]))
if verbose:
return Wstar, bstar, train_loss, val_loss, Wbest, bbest
else:
return Wstar, bstar
def computeRelativeError(p1, p2):
eps = 1e-12
error = 0
for i in range(len(p1)):
absolute_error = np.abs(p1[i] - p2[i])
denominator = np.maximum(eps, np.abs(p1[i]) + np.abs(p2[i]))
error += np.sum(np.divide(absolute_error, denominator)) / p1[i].size
return error
def loadBatch(filename):
# Load mat file
content = sio.loadmat("Datasets/cifar-10-batches-mat/{}".format(filename))
X = content['data'].T / 255
mean = np.mean(X, axis = 1)
# X = (X.T - mean).T
y = content['labels']
y = np.reshape(y, (y.shape[0],))
Y = []
for i in range(X.shape[1]):
Y.append([0 for col in range(10)])
Y[i][y[i]] = 1
Y = np.array(Y).T
return X, Y, y, mean
def normalize(X, mean):
X = (X.T - mean).T
return X
def initial(K, d, t):
# Initialize paramters
m = 50
if t == "Gaussian":
W = [np.random.normal(0, 0.001, (m, d)), np.random.normal(0, 0.001, (K, m))]
b = [np.random.normal(0, 0.001, (m, 1)), np.random.normal(0, 0.001, (K, 1))]
elif t == "Xavier":
W = [np.random.normal(0, (2 / (m + d)) ** 0.5, (m, d)), np.random.normal(0, (2 / (K + m)) ** 0.5, (K, m))]
b = [np.random.normal(0.001, (2 / (m + d)) ** 0.5, (m, 1)), np.random.normal(0.001, (2 / (K + m)) ** 0.5, (K, 1))]
# b = [np.ones((m, 1)) * 0.01, np.ones((K, 1)) * 0.01]
elif t == "He":
W = [np.random.normal(0, (2 / d) ** 0.5, (m, d)), np.random.normal(0, (2 / m) ** 0.5, (K, m))]
b = [np.random.normal(0.001, (2 / d) ** 0.5, (m, 1)), np.random.normal(0.001, (2 / m) ** 0.5, (K, 1))]
else:
print ("Initialization Type Error!")
return W, b
if __name__ == "__main__":
np.random.seed(1)
train_X, train_Y, train_y, mean = loadBatch("data_batch_1.mat")
val_X, val_Y, val_y, mean_ = loadBatch("data_batch_2.mat")
test_X, test_Y, test_y, mean_ = loadBatch("test_batch.mat")
train_X = normalize(train_X, mean)
val_X = normalize(val_X, mean)
test_X = normalize(test_X, mean)
tasks = ["Task 1: Compute Relative Error",
"Task 2: Check Overfit",
"Task 3: Find the Best Momentum",
"Task 4: Find Reasonable Range for Eta",
"Task 5: Find the Best Eta and Lambda",
"Task 6: Train the Network",
"Task 7 (Optional): Optimize the performance"]
task_label = input("\n".join(tasks) + "\nTask #: ")
if task_label == "1":
train_X = train_X[1:400, :]
d = train_X.shape[0]
K = train_Y.shape[0]
W, b = initial(K, d, "Gaussian")
lambda_ = 0.1
grad_W, grad_b = computeGradient(train_X[:, 0:10], train_Y[:, 0:10], W, b, lambda_)
grad_W1, grad_b1 = computeGradsNumSlow(train_X[:, 0:10], train_Y[:, 0:10], W, b, lambda_, 1e-6)
print ("Relative Error for W (lambda = 0.1): ", computeRelativeError([grad_W[1]], [grad_W1[1]]))
print ("Relative Error for b (lambda = 0.1): ", computeRelativeError(grad_b, grad_b1))
lambda_ = 0
grad_W, grad_b = computeGradient(train_X[:, 0:10], train_Y[:, 0:10], W, b, lambda_)
grad_W1, grad_b1 = computeGradsNumSlow(train_X[:, 0:10], train_Y[:, 0:10], W, b, lambda_, 1e-6)
print ("Relative Error for W (lambda = 0): ", computeRelativeError([grad_W[1]], [grad_W1[1]]))
print ("Relative Error for b (lambda = 0): ", computeRelativeError(grad_b, grad_b1))
if task_label == "2":
d = train_X.shape[0]
K = train_Y.shape[0]
W, b = initial(K, d, "Gaussian")
lambda_ = 0
train_X = train_X[:, 0:100]
train_Y = train_Y[:, 0:100]
train_y = train_y[0:100]
params = {
'n_batch': 100,
'n_epochs': 200,
'eta': 5e-2,
'momentum': 0,
'decay': 1,
'decay_gap': 1
}
x = [i + 1 for i in range(params['n_epochs'])]
Wstar, bstar, train_loss, val_loss, Wbest, bbest = miniBatchGD(train_X, train_Y, train_y, val_X, val_Y, val_y, W, b, lambda_, params, verbose = True)
plt.plot(x, train_loss, label = "train")
plt.plot(x, val_loss, label = "validation")
plt.legend()
plt.show()
if task_label == "3":
d = train_X.shape[0]
K = train_Y.shape[0]
W, b = initial(K, d, "Gaussian")
lambda_ = 1e-6
params = {
'n_batch': 100,
'n_epochs': 10,
'eta': 1e-2,
'momentum': 0.9,
'decay': 0.95,
'decay_gap': 1
}
x = [i + 1 for i in range(params['n_epochs'])]
for m in [0, 0.5, 0.9, 0.95, 0.99]:
params['momentum'] = m
Wstar, bstar, train_loss, val_loss, Wbest, bbest = miniBatchGD(train_X, train_Y, train_y, val_X, val_Y, val_y, W, b, lambda_, params, verbose = True)
plt.plot(x, train_loss, label = 'rho = {} (train)'.format(m))
print ("Momentum = {}".format(m))
print ("Accuracy on Test Set: {}".format(computeAccuracy(test_X, test_y, Wstar, bstar)))
plt.legend()
plt.show()
if task_label == "4":
d = train_X.shape[0]
K = train_Y.shape[0]
W, b = initial(K, d, "Gaussian")
lambda_ = 1e-6
params = {
'n_batch': 100,
'n_epochs': 5,
'eta': 1e-2,
'momentum': 0.95,
'decay': 0.95,
'decay_gap': 1
}
x = [i + 1 for i in range(params['n_epochs'])]
for m in range(5):
params['eta'] = 5e-3 + 2e-2 * m
Wstar, bstar, train_loss, val_loss, Wbest, bbest = miniBatchGD(train_X, train_Y, train_y, val_X, val_Y, val_y, W, b, lambda_, params, verbose = True)
plt.plot(x, train_loss, label = 'eta = {} (train)'.format(params['eta']))
print ("Learning Rate = {}".format(params['eta']))
print ("Accuracy on Test Set: {}".format(computeAccuracy(test_X, test_y, Wstar, bstar)))
plt.legend()
plt.show()
pass
if task_label == "5":
d = train_X.shape[0]
K = train_Y.shape[0]
W, b = initial(K, d, "Gaussian")
lambda_e_min = -8
lambda_e_max = -2
eta_e_min = math.log(0.001) / math.log(10)
eta_e_max = math.log(0.040) / math.log(10)
params = {
'n_batch': 100,
'n_epochs': 10,
'eta': 0,
'momentum': 0.95,
'decay': 0.95,
'decay_gap': 1
}
lambdas = []
etas = []
results = []
exp_time = 160
f = open("lambda_eta_select.txt", "w")
for i in range(exp_time):
lambda_ = 10 ** (lambda_e_min + random.uniform(0, 1) * (lambda_e_max - lambda_e_min))
params['eta'] = 10 ** (eta_e_min + random.uniform(0, 1) * (eta_e_max - eta_e_min))
Wstar, bstar = miniBatchGD(train_X, train_Y, train_y, val_X, val_Y, val_y, W, b, lambda_, params)
results.append(computeAccuracy(val_X, val_y, Wstar, bstar))
lambdas.append(lambda_)
etas.append(params['eta'])
print ("Lambda = {}, Eta = {}, Accuracy = {}".format(lambda_, params['eta'], results[-1]))
results = list(zip(results, lambdas, etas))
results.sort(key = lambda x: -x[0])
for i in range(min(exp_time, 500)):
f.write("Accuracy: {}, lambda: {}, eta: {}\n".format(results[i][0], results[i][1], results[i][2]))
f.close()
if task_label == "6":
train_X, train_Y, train_y, mean_ = loadBatch("data_batch_1.mat")
test_X, test_Y, test_y, mean_ = loadBatch("test_batch.mat")
for i in range(1, 5):
tem_X, tem_Y, tem_y, mean_ = loadBatch("data_batch_{}.mat".format(i + 1))
train_X = np.concatenate((train_X, tem_X), axis = 1)
train_Y = np.concatenate((train_Y, tem_Y), axis = 1)
train_y = np.concatenate((train_y, tem_y))
val_X = train_X[:, 0:1000]
val_Y = train_Y[:, 0:1000]
val_y = train_y[0:1000]
print (val_X.shape, val_Y.shape, val_y.shape)
train_X = train_X[:, 1000:]
train_Y = train_Y[:, 1000:]
train_y = train_y[1000:]
mean = np.mean(train_X, axis = 1)
train_X = normalize(train_X, mean)
val_X = normalize(val_X, mean)
test_X = normalize(test_X, mean)
d = train_X.shape[0]
K = train_Y.shape[0]
W, b = initial(K, d, "Gaussian")
params = {
'n_batch': 100,
'n_epochs': 30,
'eta': 0.017453577972249945, # 0.010800662290914505,
'momentum': 0.95,
'decay': 0.95,
'decay_gap': 1
}
lambda_ = 0.0023292248102687557 # 0.002963774526491722
Wstar, bstar, train_loss, val_loss, Wbest, bbest = miniBatchGD(train_X, train_Y, train_y, val_X, val_Y, val_y, W, b, lambda_, params, verbose = True)
x = [i + 1 for i in range(params['n_epochs'])]
plt.plot(x, train_loss, label = 'train')
plt.plot(x, val_loss, label = 'val')
print ("Accuracy on test set (final): {}".format(computeAccuracy(test_X, test_y, Wstar, bstar)))
print ("Accuracy on test set (best): {}".format(computeAccuracy(test_X, test_y, Wbest, bbest)))
plt.legend()
plt.show()
if task_label == "7":
train_X, train_Y, train_y, mean_ = loadBatch("data_batch_1.mat")
test_X, test_Y, test_y, mean_ = loadBatch("test_batch.mat")
for i in range(1, 5):
tem_X, tem_Y, tem_y, mean_ = loadBatch("data_batch_{}.mat".format(i + 1))
train_X = np.concatenate((train_X, tem_X), axis = 1)
train_Y = np.concatenate((train_Y, tem_Y), axis = 1)
train_y = np.concatenate((train_y, tem_y))
val_X = train_X[:, 0:1000]
val_Y = train_Y[:, 0:1000]
val_y = train_y[0:1000]
print (val_X.shape, val_Y.shape, val_y.shape)
train_X = train_X[:, 1000:]
train_Y = train_Y[:, 1000:]
train_y = train_y[1000:]
mean = np.mean(train_X, axis = 1)
train_X = normalize(train_X, mean)
val_X = normalize(val_X, mean)
test_X = normalize(test_X, mean)
d = train_X.shape[0]
K = train_Y.shape[0]
W, b = initial(K, d, "He")
params = {
'n_batch': 100,
'n_epochs': 50,
'eta': 0.017453577972249945, # 0.010800662290914505,
'momentum': 0.95,
'decay': 0.1,
'decay_gap': 8,
}
lambda_ = 0.0023292248102687557 # 0.002963774526491722
Wstar, bstar, train_loss, val_loss, Wbest, bbest = miniBatchGD(train_X, train_Y, train_y, val_X, val_Y, val_y, W, b,
lambda_, params, verbose = True, early_stop = True)
x = [i + 1 for i in range(len(train_loss))]
plt.plot(x, train_loss, label = 'train')
plt.plot(x, val_loss, label = 'val')
print ("Accuracy on test set (final): {}".format(computeAccuracy(test_X, test_y, Wstar, bstar)))
print ("Accuracy on test set (best): {}".format(computeAccuracy(test_X, test_y, Wbest, bbest)))
plt.legend()
plt.show()
|
[
"noreply@github.com"
] |
noreply@github.com
|
1b36b1e22e63bb7817827b4a02f3f2d9c90b4691
|
49c0056ccde2d893e56e2f15c24b19659312c073
|
/blog/migrations/0005_auto_20210112_2004.py
|
ccb3308cd5c2343ac991a08fc6f24b7a56ea450f
|
[] |
no_license
|
ferdousdjango/blogdupl
|
5f5c1ed140fac0060584c7344e6b7e6403b23a06
|
3171566cddfb6e231079f03da5f2c308891e982e
|
refs/heads/main
| 2023-02-27T20:06:55.151176
| 2021-02-03T15:20:17
| 2021-02-03T15:20:17
| 333,327,985
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 752
|
py
|
# Generated by Django 3.1.4 on 2021-01-12 14:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_postright'),
]
operations = [
migrations.AddField(
model_name='post',
name='homeimage',
field=models.ImageField(blank=True, max_length=300, upload_to='media'),
),
migrations.AddField(
model_name='post',
name='hometitle',
field=models.CharField(blank=True, max_length=155),
),
migrations.AddField(
model_name='post',
name='image',
field=models.ImageField(blank=True, max_length=300, upload_to='media'),
),
]
|
[
"helloferdous@gmail.com"
] |
helloferdous@gmail.com
|
772770f9242c44fcce1f2f8a76f0f56cd8a222fb
|
a29c96b6fc4942b519edcd7157d42f34add78feb
|
/horovod/spark/keras/estimator.py
|
9be8b9bd942d3460316ce5d4764fdfb3ce636617
|
[
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
xielm12/horovod
|
5662456cd3626ba3f9fed426bbee1901f1a27014
|
32e5fdbf33fb2dac9d725028a886a093984c3618
|
refs/heads/master
| 2022-12-25T20:10:39.771600
| 2020-09-22T22:01:47
| 2020-09-22T22:01:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,429
|
py
|
# Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import horovod.spark.common._namedtuple_fix
import numbers
import time
from distutils.version import LooseVersion
import numpy as np
import tensorflow as tf
from pyspark import keyword_only
from pyspark.ml.util import MLWritable, MLReadable
from pyspark.ml.param.shared import Param, Params
from horovod.runner.common.util import codec
from horovod.spark.common import util
from horovod.spark.common.estimator import HorovodEstimator, HorovodModel
from horovod.spark.common.params import EstimatorParams
from horovod.spark.common.serialization import HorovodParamsWriter, HorovodParamsReader
from horovod.spark.keras import remote
from horovod.spark.keras.util import \
BARE_KERAS, TF_KERAS, \
BareKerasUtil, TFKerasUtil, \
is_instance_of_bare_keras_model, is_instance_of_bare_keras_optimizer
class KerasEstimatorParamsWriter(HorovodParamsWriter):
def saveImpl(self, path):
keras_utils = self.instance._get_keras_utils()
# Write the parameters
HorovodParamsWriter.saveMetadata(self.instance, path, self.sc,
param_serializer_fn=keras_utils.serialize_param_value)
class KerasEstimatorParamsWritable(MLWritable):
def write(self):
return KerasEstimatorParamsWriter(self)
class KerasEstimatorParamsReader(HorovodParamsReader):
def _deserialize_dict(self, dict):
def _param_deserializer_fn(name, param_val, keras_utils, custom_objects):
if param_val is None:
return param_val
if name == EstimatorParams.model.name:
def load_model_fn(x):
with keras_utils.keras().utils.custom_object_scope(custom_objects):
return keras_utils.keras().models.load_model(x, compile=True)
return keras_utils.deserialize_model(param_val,
load_model_fn=load_model_fn)
elif name == KerasEstimator.optimizer.name:
opt_base64_encoded = codec.loads_base64(param_val)
return keras_utils.deserialize_optimizer(opt_base64_encoded)
else:
return codec.loads_base64(param_val)
# In order to deserialize the model, we need to deserialize the custom_objects param
# first.
keras_utils = None
if KerasEstimator._keras_pkg_type.name in dict:
keras_pkg_type = _param_deserializer_fn(KerasEstimator._keras_pkg_type.name,
dict[KerasEstimator._keras_pkg_type.name],
None, None)
if keras_pkg_type == BARE_KERAS:
keras_utils = BareKerasUtil
elif keras_pkg_type == TF_KERAS:
keras_utils = TFKerasUtil
custom_objects = {}
if KerasEstimator.custom_objects.name in dict:
custom_objects = _param_deserializer_fn(KerasEstimator.custom_objects.name,
dict[KerasEstimator.custom_objects.name],
None, None)
for key, val in dict.items():
dict[key] = _param_deserializer_fn(key, val, keras_utils, custom_objects)
return dict
class KerasEstimatorParamsReadable(MLReadable):
@classmethod
def read(cls):
"""Returns a KerasEstimatorParamsReader instance for this class."""
return KerasEstimatorParamsReader(cls)
class KerasEstimator(HorovodEstimator, KerasEstimatorParamsReadable,
KerasEstimatorParamsWritable):
"""Spark Estimator for fitting Keras models to a DataFrame.
Supports standalone `keras` and `tf.keras`, and TensorFlow 1.X and 2.X.
Args:
num_proc: Number of Horovod processes. Defaults to `spark.default.parallelism`.
model: Keras model to train.
backend: Optional Backend object for running distributed training function. Defaults to SparkBackend with
`num_proc` worker processes. Cannot be specified if `num_proc` is also provided.
store: Store object that abstracts reading and writing of intermediate data and run results.
custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered
during serialization/deserialization.
optimizer: Keras optimizer to be converted into a `hvd.DistributedOptimizer` for training.
loss: Keras loss or list of losses.
loss_weights: Optional list of float weight values to assign each loss.
sample_weight_col: Optional column indicating the weight of each sample.
gradient_compression: Gradient compression used by `hvd.DistributedOptimizer`.
metrics: Optional metrics to record.
feature_cols: Column names used as feature inputs to the model. Must be a list with each feature
mapping to a sequential argument in the model's forward() function.
label_cols: Column names used as labels. Must be a list with one label for each output of the model.
validation: Optional validation column name (string) where every row in the column is either 1/True or 0/False,
or validation split (float) giving percent of data to be randomly selected for validation.
callbacks: Keras callbacks.
batch_size: Number of rows from the DataFrame per batch.
epochs: Number of epochs to train.
verbose: Verbosity level [0, 2] (default: 1).
shuffle_buffer_size: Optional size of in-memory shuffle buffer in rows. Allocating a larger buffer size
increases randomness of shuffling at the cost of more host memory. Defaults to estimating
with an assumption of 4GB of memory per host.
partitions_per_process: Number of Parquet partitions to assign per worker process from `num_proc` (default: 10).
run_id: Optional unique ID for this run for organization in the Store. Will be automatically assigned if not
provided.
train_steps_per_epoch: Number of steps to train each epoch. Useful for testing that model trains successfully.
Defaults to training the entire dataset each epoch.
validation_steps_per_epoch: Number of validation steps to perform each epoch.
transformation_fn: Optional function that takes a row as its parameter
and returns a modified row that is then fed into the
train or validation step. This transformation is
applied after batching. See Petastorm [TransformSpec](https://github.com/uber/petastorm/blob/master/petastorm/transform.py)
for more details. Note that this fucntion constructs
another function which should perform the
transformation.
train_reader_num_workers: This parameter specifies the number of parallel processes that
read the training data from data store and apply data
transformations to it. Increasing this number
will generally increase the reading rate but will also
increase the memory footprint. More processes are
particularly useful if the bandwidth to the data store is not
high enough, or users need to apply transformation such as
decompression or data augmentation on raw data.
val_reader_num_workers: Similar to the train_reader_num_workers.
"""
custom_objects = Param(Params._dummy(), 'custom_objects', 'custom objects')
_keras_pkg_type = Param(Params._dummy(), '_keras_pkg_type', 'keras package type')
checkpoint_callback = Param(Params._dummy(), 'checkpoint_callback',
'model checkpointing callback')
@keyword_only
def __init__(self,
num_proc=None,
model=None,
backend=None,
store=None,
custom_objects=None,
optimizer=None,
loss=None,
loss_weights=None,
sample_weight_col=None,
gradient_compression=None,
metrics=None,
feature_cols=None,
label_cols=None,
validation=None,
callbacks=None,
batch_size=None,
epochs=None,
verbose=None,
shuffle_buffer_size=None,
partitions_per_process=None,
run_id=None,
train_steps_per_epoch=None,
validation_steps_per_epoch=None,
transformation_fn=None,
train_reader_num_workers=None,
val_reader_num_workers=None,
label_shapes=None,
checkpoint_callback=None):
super(KerasEstimator, self).__init__()
self._setDefault(optimizer=None,
custom_objects={},
_keras_pkg_type=None,
checkpoint_callback=None)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _get_keras_utils(self):
# This function determines the keras package type of the Estimator based on the passed
# optimizer and model and updates _keras_pkg_type parameter.
model_type = None
model = self.getModel()
if model:
if isinstance(model, tf.keras.Model):
model_type = TF_KERAS
elif is_instance_of_bare_keras_model(model):
model_type = BARE_KERAS
else:
raise ValueError(
"model has to be an instance of tensorflow.keras.Model or keras.Model")
optimizer_type = None
optimizer = self.getOptimizer()
if optimizer:
if isinstance(optimizer, str):
optimizer_type = None
elif isinstance(optimizer, tf.keras.optimizers.Optimizer):
optimizer_type = TF_KERAS
elif is_instance_of_bare_keras_optimizer(optimizer):
optimizer_type = BARE_KERAS
else:
raise ValueError("invalid optimizer type")
types = set([model_type, optimizer_type])
types.discard(None)
if len(types) > 1:
raise ValueError('mixed keras and tf.keras values for optimizers and model')
elif len(types) == 1:
pkg_type = types.pop()
super(KerasEstimator, self)._set(_keras_pkg_type=pkg_type)
if pkg_type == TF_KERAS:
return TFKerasUtil
elif pkg_type == BARE_KERAS:
return BareKerasUtil
else:
raise ValueError("invalid keras type")
def setCustomObjects(self, value):
return self._set(custom_objects=value)
def getCustomObjects(self):
return self.getOrDefault(self.custom_objects)
def setCheckpointCallback(self, value):
return self._set(checkpoint_callback=value)
def getCheckpointCallback(self):
return self.getOrDefault(self.checkpoint_callback)
def _check_metadata_compatibility(self, metadata):
input_shapes, output_shapes = self.get_model_shapes()
util.check_shape_compatibility(metadata,
self.getFeatureCols(),
self.getLabelCols(),
input_shapes=input_shapes,
output_shapes=output_shapes,
label_shapes=self.getLabelShapes())
def get_model_shapes(self):
model = self.getModel()
input_shapes = [[dim if dim else -1 for dim in input.shape.as_list()]
for input in model.inputs]
output_shapes = [[dim if dim else -1 for dim in output.shape.as_list()]
for output in model.outputs]
return input_shapes, output_shapes
def _fit_on_prepared_data(self, backend, train_rows, val_rows, metadata, avg_row_size, dataset_idx=None):
self._check_params(metadata)
keras_utils = self._get_keras_utils()
run_id = self.getRunId()
if run_id is None:
run_id = 'keras_' + str(int(time.time()))
if self._has_checkpoint(run_id):
serialized_model = self._load_model_from_checkpoint(run_id)
else:
serialized_model = self._compile_model(keras_utils)
# Workaround:
# https://stackoverflow.com/questions/50583056/is-there-any-way-to-set-java-opts-for-tensorflow-process/50615570
env = {'LIBHDFS_OPTS': '-Xms2048m -Xmx2048m'}
trainer = remote.RemoteTrainer(self, metadata, keras_utils, run_id, dataset_idx)
handle = backend.run(trainer,
args=(serialized_model, train_rows, val_rows, avg_row_size),
env=env)
return self._create_model(handle, run_id, metadata)
def _load_model_from_checkpoint(self, run_id):
store = self.getStore()
last_ckpt_path = store.get_checkpoint_path(run_id)
if self.getVerbose():
print('Resuming training from last checkpoint: {}'.format(last_ckpt_path))
return store.read_serialized_keras_model(last_ckpt_path, self.getModel())
def _compile_model(self, keras_utils):
# Compile the model with all the parameters
model = self.getModel()
loss = self.getLoss()
loss_weights = self.getLossWeights()
if not loss:
raise ValueError('Loss parameter is required for the model to compile')
optimizer = self.getOptimizer()
if not optimizer:
optimizer = model.optimizer
if not optimizer:
raise ValueError('Optimizer must be provided either as a parameter or as part of a '
'compiled model')
metrics = self.getMetrics()
gradient_compression = self.getGradientCompression()
optimizer_weight_values = optimizer.get_weights()
dist_optimizer_args = dict(optimizer=optimizer)
if gradient_compression:
dist_optimizer_args['compression'] = gradient_compression
# Horovod: wrap optimizer with DistributedOptimizer.
dist_optimizer = keras_utils.get_horovod().DistributedOptimizer(**dist_optimizer_args)
model.compile(optimizer=dist_optimizer,
loss=loss,
loss_weights=loss_weights,
metrics=metrics)
if optimizer_weight_values:
model.optimizer.set_weights(optimizer_weight_values)
return keras_utils.serialize_model(model)
def _create_model(self, run_results, run_id, metadata):
keras_utils = self._get_keras_utils()
keras_module = keras_utils.keras()
floatx = keras_module.backend.floatx()
custom_objects = self.getCustomObjects()
history, serialized_model, hvd_size = run_results[0]
def load_model_fn(x):
with keras_module.utils.custom_object_scope(custom_objects):
return keras_module.models.load_model(x)
model = keras_utils.deserialize_model(serialized_model, load_model_fn=load_model_fn)
# Here, learning rate is scaled down with the number of horovod workers.
# This is important the retraining of the model. User may retrain the model with
# different number of workers and we need the raw learning rate to adjust with the
# new number of workers.
scaled_lr = keras_module.backend.get_value(model.optimizer.lr)
keras_module.backend.set_value(model.optimizer.lr, scaled_lr / hvd_size)
return self.get_model_class()(**self._get_model_kwargs(
model, history, run_id, metadata, floatx))
def get_model_class(self):
return KerasModel
def _get_model_kwargs(self, model, history, run_id, metadata, floatx):
return dict(history=history,
model=model,
feature_columns=self.getFeatureCols(),
label_columns=self.getLabelCols(),
custom_objects=self.getCustomObjects(),
run_id=run_id,
_metadata=metadata,
_floatx=floatx)
class KerasModel(HorovodModel, KerasEstimatorParamsReadable,
KerasEstimatorParamsWritable):
"""Spark Transformer wrapping a Keras model, used for making predictions on a DataFrame.
Retrieve the underlying Keras model by calling `keras_model.getModel()`.
Args:
history: List of metrics, one entry per epoch during training.
model: Trained Keras model.
feature_columns: List of feature column names.
label_columns: List of label column names.
custom_objects: Keras custom objects.
run_id: ID of the run used to train the model.
"""
custom_objects = Param(Params._dummy(), 'custom_objects', 'custom objects')
# Setting _keras_pkg_type parameter helps us determine the type of keras package during
# deserializing the transformer
_keras_pkg_type = Param(Params._dummy(), '_keras_pkg_type', 'keras package type')
_floatx = Param(Params._dummy(), '_floatx', 'keras default float type')
@keyword_only
def __init__(self,
history=None,
model=None,
feature_columns=None,
label_columns=None,
custom_objects=None,
run_id=None,
_metadata=None,
_floatx=None):
super(KerasModel, self).__init__()
if label_columns:
self.setOutputCols([col + '__output' for col in label_columns])
self._setDefault(custom_objects={})
kwargs = self._input_kwargs
self.setParams(**kwargs)
def setCustomObjects(self, value):
return self._set(custom_objects=value)
def getCustomObjects(self):
return self.getOrDefault(self.custom_objects)
def _get_keras_utils(self, model=None):
# infer keras package from model
model = self.getModel()
if model:
if isinstance(model, tf.keras.Model):
pkg_type = TF_KERAS
elif is_instance_of_bare_keras_model(model):
pkg_type = BARE_KERAS
else:
raise ValueError(
"model has to be an instance of tensorflow.keras.Model or keras.Model")
super(KerasModel, self)._set(_keras_pkg_type=pkg_type)
if pkg_type == TF_KERAS:
return TFKerasUtil
elif pkg_type == BARE_KERAS:
return BareKerasUtil
else:
raise ValueError("invalid keras type")
raise ValueError("model is not set")
def _get_floatx(self):
return self.getOrDefault(self._floatx)
# To run locally on OS X, need export OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES
def _transform(self, df):
keras_utils = self._get_keras_utils()
floatx = self._get_floatx()
serialized_model = keras_utils.serialize_model(self.getModel())
label_cols = self.getLabelColumns()
output_cols = self.getOutputCols()
feature_cols = self.getFeatureColumns()
custom_objects = self.getCustomObjects()
metadata = self._get_metadata()
pin_cpu = remote._pin_cpu_fn()
def predict(rows):
import tensorflow as tf
from pyspark import Row
from pyspark.ml.linalg import DenseVector, SparseVector
k = keras_utils.keras()
k.backend.set_floatx(floatx)
# Do not use GPUs for prediction, use single CPU core per task.
pin_cpu(tf, k)
def load_model_fn(x):
with k.utils.custom_object_scope(custom_objects):
return k.models.load_model(x)
model = keras_utils.deserialize_model(serialized_model,
load_model_fn=load_model_fn)
input_shapes = [[dim if dim else -1 for dim in input.shape.as_list()]
for input in model.inputs]
def to_array(item):
if type(item) in [DenseVector or SparseVector]:
return item.toArray()
else:
return np.array(item)
def to_numpy(item):
# Some versions of TensorFlow will return an EagerTensor
return item.numpy() if hasattr(item, 'numpy') else item
# Perform predictions.
for row in rows:
fields = row.asDict().copy()
preds = model.predict_on_batch(
[to_array(row[feature_cols[i]]).reshape(input_shapes[i])
for i in range(len(feature_cols))])
preds = [to_numpy(item) for item in preds]
for label_col, output_col, pred, in zip(label_cols, output_cols, preds):
meta = metadata[label_col]
col_type = meta['spark_data_type']
# dtype for DenseVector and SparseVector is always np.float64
if col_type == DenseVector:
shape = np.prod(pred.shape)
flattened_pred = pred.reshape(shape, )
field = DenseVector(flattened_pred)
elif col_type == SparseVector:
shape = meta['shape']
flattened_pred = pred.reshape(shape, )
nonzero_indices = flattened_pred.nonzero()[0]
field = SparseVector(shape, nonzero_indices,
flattened_pred[nonzero_indices])
else:
# If the column is scalar type, int, float, etc.
value = pred[0]
python_type = util.spark_scalar_to_python_type(col_type)
if issubclass(python_type, numbers.Integral):
value = round(value)
field = python_type(value)
fields[output_col] = field
yield Row(**fields)
return df.rdd.mapPartitions(predict).toDF()
|
[
"noreply@github.com"
] |
noreply@github.com
|
0405898d24af93f463de789847b0398a0e8e0b97
|
092d82f8a64f8e33a739ae023667253a75bfb9ae
|
/jury/forms.py
|
ac08bc91b6d6b266345bc9fb2f865acbf50bba23
|
[
"MIT"
] |
permissive
|
COdingaorg/The_Jury
|
8c103eec028891b1ee98ede786fb54638bd16ba6
|
a4432269a023edf49a010644ca4f06324a934d7f
|
refs/heads/main
| 2023-06-18T10:43:14.888503
| 2021-07-20T16:05:59
| 2021-07-20T16:05:59
| 386,658,998
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 679
|
py
|
from jury.models import UserProfile, UserProject
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
class registerUser(UserCreationForm):
class Meta:
model = User
fields = ['username', 'first_name', 'last_name', 'email', 'password1', 'password2']
class UploadProjectForm(forms.ModelForm):
class Meta:
model = UserProject
fields = ['project_title', 'project_image', 'project_description', 'project_link']
class AddorEditProfile(forms.ModelForm):
class Meta:
model = UserProfile
fields = ['photo_path', 'user_bio', 'facebook_account', 'twitter_account', 'instagram_account']
|
[
"calemasanga@gmail.com"
] |
calemasanga@gmail.com
|
943f9a56f01dbd5d3da769e1bca8d7b26ee4f82a
|
cec2ba69ce9cb84f05097a135a64497852016c45
|
/Battleship.py
|
d4d2a82a071cfca0d63d5249e42ee1d6f3457a4d
|
[] |
no_license
|
EthanTaft/PythonLearning
|
22d11f7b37c7f6069e90f5edcf174cdc86b15664
|
8947b576f5045bcaa705d9d270fcc9a5c7f20640
|
refs/heads/master
| 2021-08-20T09:32:51.899628
| 2017-11-28T20:33:07
| 2017-11-28T20:33:07
| 112,286,872
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,200
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 17 14:19:41 2017
@author: Ethan
"""
from random import randint
board = []
for i in range(5):
board.append(['O', 'O', 'O', 'O', 'O'])
print(board)
def print_board(board_in):
for row in board_in:
print(" ".join(row))
print_board(board)
def random_row(board_in):
return(randint(0, len(board_in) - 1))
def random_col(board_in):
return(randint(0, len(board_in) - 1))
ship_row = random_row(board)
ship_col = random_col(board)
for turn in range(4):
print("Turn", turn + 1)
guess_row = int(input("Guess Row: "))
guess_col = int(input("Guess Col: "))
if guess_row == ship_row and guess_col == ship_col:
print("Congratulations! you sank my battleship!")
break
else:
if guess_row not in range(5) or guess_col not in range(5):
print("Oops, that's not even in the ocean.")
elif board[guess_row][guess_col] == "X":
print("You guessed that one already.")
else:
print("You missed my battleship!")
board[guess_row][guess_col] = "X"
print_board(board)
if turn == 3:
print("Game Over")
|
[
"ethan.taft@healthcatalyst.com"
] |
ethan.taft@healthcatalyst.com
|
cc42ed3292ae011c58c3f52d8268253828b8b0f6
|
97e764ca8ee0ef7c1943b97b736f3b7190170787
|
/Regression_Problem/PearsonCorrelation.py
|
3ff6d34eda4c477d65751fd523b6513098b32695
|
[
"MIT"
] |
permissive
|
xinpengliu/Machine-Learning-Practice
|
2aa7b82216e5a4506a2cd191cc57d3d4c55f0d86
|
dae55f52bb31f428526d6d60229bd1827c4e0af0
|
refs/heads/master
| 2020-03-14T00:35:33.942020
| 2017-07-20T05:54:21
| 2017-07-20T05:54:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 995
|
py
|
'''
Created on Apr 24, 2017
@author: Leo Zhong
'''
import numpy as np
from astropy.units import Ybarn
import math
def computeCorrelation(X, Y):
xBar = np.mean(X)
yBar = np.mean(Y)
SSR = 0
varX = 0
varY = 0
for i in range(0 , len(X)):
diffXXBar = X[i] - xBar
diffYYBar = Y[i] - yBar
SSR += (diffXXBar * diffYYBar)
varX += diffXXBar**2
varY += diffYYBar**2
SST = math.sqrt(varX * varY)
return SSR / SST
def polyfit(x,y,degree):
result={}
coffs = np.polyfit(x, y, degree)
#polynomial cofficient
result['polynomial']=coffs.tolist()
#r-squared
p=np.poly1d(coffs)
yhat=p(x)
ybar=np.sum(y)/len(y)
ssreg=np.sum((yhat-ybar)**2)
sstot=np.sum((y-ybar)**2)
result['determination']=ssreg/sstot
return result
testX = [1, 3, 8, 7, 9]
testY = [10, 12, 24, 21, 34]
print (computeCorrelation(testX, testY))
print (polyfit(testX, testY, 1))
|
[
"zhong5930@gmail.com"
] |
zhong5930@gmail.com
|
5404e3ad8934d8abdd386447c64ee0c0a8c716f7
|
93f5ee5cc7b863029c54a766e9f5fa0b0e52191f
|
/BayesianOptimization/20180403_two_hparas.py
|
f2c660d6aa1078720adfdb30d305f189ed7051c7
|
[] |
no_license
|
ShihPingLai/Jacob-deep_learning
|
29ad17839da7a34e01db1a626942862e250e8619
|
dfbaa178ac537a189a062a23904072a7d8e550a9
|
refs/heads/master
| 2020-03-13T11:51:51.276939
| 2018-04-26T04:19:15
| 2018-04-26T04:19:15
| 131,108,620
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,156
|
py
|
#!/usr/bin/python3
'''
Abstract:
This is a program to exercise how to optimize deep learning with Bayesian Optimization.
Copy from "BayesianOptimization/examples/exploitation vs exploration.ipynb"
Usage:
20180403_two_hparas.py
Source:
BayesianOptimization/examples/exploitation vs exploration.ipynb
##################################
# Python3 #
# This code is made in python3 #
##################################
20170403
####################################
update log
20180403 version alpha 1:
1. I don't know
'''
# modules for Bayesian
from bayes_opt import BayesianOptimization
import pymc as pm
# modules for deep learning
import tensorflow as tf
# common modules
import numpy as np
import matplotlib.pyplot as plt
import time
from IPython.core.pylabtools import figsize
# Utility function for plotting
def plot_bo(f, bo, figname):
xs = [x["x"] for x in bo.res["all"]["params"]]
ys = bo.res["all"]["values"]
mean, sigma = bo.gp.predict(np.arange(len(f)).reshape(-1, 1), return_std=True)
plt.figure(figsize=(16, 9))
plt.plot(f)
plt.plot(np.arange(len(f)), mean)
plt.fill_between(np.arange(len(f)), mean+sigma, mean-sigma, alpha=0.1)
plt.scatter(bo.X.flatten(), bo.Y, c="red", s=50, zorder=10)
plt.xlim(0, len(f))
plt.ylim(f.min()-0.1*(f.max()-f.min()), f.max()+0.1*(f.max()-f.min()))
plt.savefig(figname)
return
#--------------------------------------------
# main code
if __name__ == "__main__":
VERBOSE = 0
# measure times
start_time = time.time()
#-----------------------------------
# load hyperparas
# use sklearn's default parameters for theta and random_start
gp_params = {"alpha": 1e-5, "n_restarts_optimizer": 2}
# Target function
np.random.seed(42)
xs = np.linspace(-2, 10, 10000)
f = np.exp(-(xs - 2)**2) + np.exp(-(xs - 6)**2/10) + 1/ (xs**2 + 1)
if VERBOSE>0:
plt.plot(f)
plt.show()
#-----------------------------------
# Acquisition function 1: Upper Confidence Bound
# Prefer exploitation (kappa=1.0)
bo = BayesianOptimization(f=lambda x: f[int(x)],
pbounds={"x": (0, len(f)-1)},
verbose=0)
bo.maximize(init_points=2, n_iter=25, acq="ucb", kappa=1, **gp_params)
plot_bo(f, bo, "ucb_exploitation.png")
# Prefer exploration (kappa=10)
bo = BayesianOptimization(f=lambda x: f[int(x)],
pbounds={"x": (0, len(f)-1)},
verbose=0)
bo.maximize(init_points=2, n_iter=25, acq="ucb", kappa=10, **gp_params)
plot_bo(f, bo, "ucb_exploration.png")
#-----------------------------------
# Acquisition function 2: Expected Improvement
# Prefer exploitation (xi=0.0)
bo = BayesianOptimization(f=lambda x: f[int(x)],
pbounds={"x": (0, len(f)-1)},
verbose=0)
bo.maximize(init_points=2, n_iter=25, acq="ei", xi=1e-4, **gp_params)
plot_bo(f, bo, "ei_exploitation.png")
# Prefer exploration (xi=0.1)
bo = BayesianOptimization(f=lambda x: f[int(x)],
pbounds={"x": (0, len(f)-1)},
verbose=0)
bo.maximize(init_points=2, n_iter=25, acq="ei", xi=0.1, **gp_params)
plot_bo(f, bo, "ei_exploration.png")
#-----------------------------------
# Acquisition function 3: Probability of Improvement
# Prefer exploitation (xi=0.0)
bo = BayesianOptimization(f=lambda x: f[int(x)], pbounds={"x": (0, len(f)-1)}, verbose=0)
bo.maximize(init_points=2, n_iter=25, acq="poi", xi=1e-4, **gp_params)
plot_bo(f, bo, "poi_exploitation.png")
# Prefer exploration (xi=0.1)
bo = BayesianOptimization(f=lambda x: f[int(x)], pbounds={"x": (0, len(f)-1)}, verbose=0)
bo.maximize(init_points=2, n_iter=25, acq="poi", xi=0.1, **gp_params)
plot_bo(f, bo, "poi_exploration.png")
#-----------------------------------
# measuring time
elapsed_time = time.time() - start_time
print ("Exiting Main Program, spending ", elapsed_time, "seconds.")
|
[
"z123a123s123@gmail.com"
] |
z123a123s123@gmail.com
|
c88a1af397f5418a03100cac9cde8e9e4629f207
|
34d1d64a049dd3a25293955f6312072f2fcb3905
|
/set-1/challenge2.py
|
f54288641f2df4a0648832da78827542e6a9bb54
|
[] |
no_license
|
alex-bellon/cryptopals
|
c82ec87377911e6cae365cb48b2058789b93b9a1
|
5bc6242a5b972866ba7eebe2f6efa80c7ebff71c
|
refs/heads/master
| 2020-05-03T18:40:02.320249
| 2019-08-16T21:15:27
| 2019-08-16T21:15:27
| 178,761,916
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 189
|
py
|
a = '1c0111001f010100061a024b53535009181c'
b = '686974207468652062756c6c277320657965'
aBin = bin(int(a, 16))[2:]
bBin = bin(int(b, 16))[2:]
c = int(aBin, 2) ^ int(bBin, 2)
print(hex(c))
|
[
"alexrbellon@gmail.com"
] |
alexrbellon@gmail.com
|
c3b224c2fb8cd240476e5ebc7795c22ed913304e
|
6a7e6a9a27b2141c7312b04b6cba3852af016c69
|
/Lauhdutin/@Resources/Frontend/GenericFilePathDialog.py
|
9411750cd205238ebe70047d24473a5aba624706
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
Tene21/Lauhdutin
|
35ed025f286503a3e861fc4c92415e84b1509ee2
|
998bfac4c02fc404614fb96c215bbe45bc8aca01
|
refs/heads/master
| 2021-01-20T22:02:30.201304
| 2017-05-29T08:09:15
| 2017-05-29T08:09:15
| 101,792,936
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 616
|
py
|
import sys, os, subprocess
try:
from tkinter import *
from tkinter import filedialog
RainmeterPath = os.path.join(sys.argv[1][:-1], "Rainmeter.exe")
FunctionName = sys.argv[2][:-1]
InitialDir = sys.argv[3][:-1]
Config = sys.argv[4][:-1]
root = Tk()
root.withdraw()
path = filedialog.askopenfile(initialdir=InitialDir)
subprocess.call(
[
RainmeterPath, "!CommandMeasure", "SettingsScript",
"%s('%s')" % (FunctionName, path), Config
],
shell=False)
except ImportError:
import traceback
traceback.print_exc()
input()
|
[
"noreply@github.com"
] |
noreply@github.com
|
9f4e62cb49368115d24ed01964de31c04727d60e
|
4ce1cecacda0da4f662f188c89e793a60c8c0439
|
/Door.py
|
be43c539033cd6d0c4f732b7321357ef4af02a9e
|
[] |
no_license
|
EuanOR/FYP
|
5419b1c8c18a0f24a1628e54c068aadf121ebe9e
|
91fb5803cad09d6eb7b2c1ed74b7fe45120248ea
|
refs/heads/master
| 2020-04-24T11:54:47.632710
| 2019-03-25T19:58:55
| 2019-03-25T19:58:55
| 171,941,212
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 681
|
py
|
class Door(object):
def __init__(self,thickness):
if 1.0 < thickness < 3.0:
self._thickness = thickness
else:
print("Door must be between 1 and 3 inches thick")
self._open = False
def get_thickness(self):
return self._thickness
def set_thickness(self, thickness):
if 1.0 < thickness < 3.0:
self._thickness = thickness
else:
print("Door must be between 1 and 3 inches thick")
def open_door(self):
self._open = True
def close_door(self):
self._open = False
def is_open(self):
return self._open
|
[
"115312821@umail.ucc.ie"
] |
115312821@umail.ucc.ie
|
6d61f171ddbc7385d9fec8b40e92e0a29e3dd8dd
|
916586620128e8c357b634192512b253bb4fc944
|
/00_mysite/mysite/settings.py
|
f9f391ba5dc44ba1d02b040704163d93f59a11dc
|
[] |
no_license
|
Kevinqian0501/Django_start
|
f11fdc9a2a548b7623ee29de32c8303d746bde30
|
315abaabb28fd4137b9e4f9bd32b44e6db410adc
|
refs/heads/master
| 2021-05-16T14:44:25.983886
| 2018-01-24T18:30:07
| 2018-01-24T18:30:07
| 118,492,770
| 0
| 0
| null | 2018-01-24T18:30:08
| 2018-01-22T17:46:44
|
Python
|
UTF-8
|
Python
| false
| false
| 3,150
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.1.dev20180121070910.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!u*lko(6$ux(ksrs&)!g6qr8fkx(%b9v1io09f%^1z4ywd!zly'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['209.126.122.45']
# Application definition
INSTALLED_APPS = [
'polls.apps.PollsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = '/static/'
|
[
"kevin@kavout.co"
] |
kevin@kavout.co
|
3be5a911c554072c02c06f1a186d5799347d1876
|
8394c2b1bd17f04e5cb219c98e300d91530ba831
|
/project/utils/models/model_handling.py
|
ba756f593b55b1bc91b8ac5cecc3c61af35624f4
|
[] |
no_license
|
justinwhatley/interpretability_experiment
|
f26356ce16282a715ba951560c56a94823f733b6
|
fcfdd2441f47dab7f1b711f7fe18b49efbe6b791
|
refs/heads/master
| 2022-11-05T14:42:19.367835
| 2020-06-26T20:54:27
| 2020-06-26T20:54:27
| 259,716,470
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 939
|
py
|
import joblib
def load_or_store_model(func):
"""
Wrapper/decorator to check whether the model is already saved to return saved model instead of new training
Function must have a 'save_to' filepath and 'recompute' bool must be defined
"""
def loading_wrapper(*args, **kwargs):
recompute = kwargs['recompute']
save_to = kwargs['save_to']
if not recompute:
try:
print('Loading previously trained model: ' + str(save_to))
return joblib.load(save_to)
except:
print('Model not found: ' + str(save_to))
print('Training: ' + func.__module__)
model = func(*args, **kwargs)
return save_model(model, save_to)
def save_model(model, save_to):
print('Saving model to: ' + str(save_to))
joblib.dump(model, save_to)
return model
return loading_wrapper
|
[
"justinwhatley5@gmail.com"
] |
justinwhatley5@gmail.com
|
9bbe6ad656b19e2b6235563076647a80dba49d14
|
f6100704f93c448f357c4753aec50799c396d991
|
/操作db离线脚本.py
|
edc1a663af5ab4013a3c6b4b0fe174629bdb2c24
|
[] |
no_license
|
wssf812/Flask-basic-options
|
9c28aa12367b247c026a3f7643000354ea271613
|
340194a9e28adab92f135b410d17bb5e210bbfc1
|
refs/heads/master
| 2023-03-01T14:06:54.022222
| 2021-02-09T06:58:34
| 2021-02-09T06:58:34
| 337,299,254
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 704
|
py
|
# -*- coding: utf-8 -*-
# *Time* : 2021/2/3 10:06
# *Author* : wssf
# *File* : 操作db离线脚本.py
# *Software*: PyCharm
"离线脚本,用来创建数据库,插入数据,可以在不启动flask程序的基础上"
from Flask_example import db
from Flask_example import create_app
from werkzeug.security import generate_password_hash # 导入加密工具
from Flask_example import models
app = create_app()
with app.app_context():
# db.create_all() #根据类创建所有表
user = models.Users(
username="liu",
password=generate_password_hash("123456")
)
# 向数据库中增加数据
db.session.add(user)
# 提交数据
db.session.commit()
|
[
"1228589545@qq.com"
] |
1228589545@qq.com
|
8ebe3c061d8acbaf5cbbcdb7219aa906364cb940
|
3760f688b5f03b3334853500a960b3daf2666dd6
|
/todos/urls.py
|
9d26ca819a09a7a00ed16e791f66d6bc3b8f291f
|
[] |
no_license
|
Cody1009/django_todo_api
|
a1ece2cdf6f1ddd1299fb3d095859419329cbfd4
|
4057ccddb3211abb25e1f8ae3e572b2a6c72257c
|
refs/heads/master
| 2023-07-31T02:32:50.473349
| 2020-05-03T01:05:09
| 2020-05-03T01:05:09
| 260,803,895
| 0
| 0
| null | 2021-09-22T19:02:43
| 2020-05-03T01:02:10
|
Python
|
UTF-8
|
Python
| false
| false
| 159
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.ListTodo.as_view()),
path('<int:pk>/', views.DetailTodo.as_view())
]
|
[
"nansiki02@gmail.com"
] |
nansiki02@gmail.com
|
26534e055871d229971a287afd01f30afec488e8
|
03d07de94fc22d1583c45ca84c711a06df8a40ff
|
/lc/dynamic_programming/lc_91_decode-ways.py
|
47e6fb60ea6793ea85275e7e4575d8b528ab5713
|
[] |
no_license
|
gaopenghigh/algorithm
|
94e04293c69a2ad6903495e1cf6e1b75556535bb
|
f5d78c98c7201c56f9d4c3a9c0c76e9447a17985
|
refs/heads/master
| 2022-03-11T18:46:38.712923
| 2022-02-20T14:20:54
| 2022-02-20T14:20:54
| 54,484,549
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,054
|
py
|
# 91. 解码方法
# 难度 中等
# 一条包含字母 A-Z 的消息通过以下映射进行了 编码 :
# 'A' -> "1"
# 'B' -> "2"
# ...
# 'Z' -> "26"
# 要 解码 已编码的消息,所有数字必须基于上述映射的方法,反向映射回字母(可能有多种方法)。例如,"11106" 可以映射为:
# "AAJF" ,将消息分组为 (1 1 10 6)
# "KJF" ,将消息分组为 (11 10 6)
# 注意,消息不能分组为 (1 11 06) ,因为 "06" 不能映射为 "F" ,这是由于 "6" 和 "06" 在映射中并不等价。
# 给你一个只含数字的 非空 字符串 s ,请计算并返回 解码 方法的 总数 。
# 题目数据保证答案肯定是一个 32 位 的整数。
#
# 示例 1:
# 输入:s = "12"
# 输出:2
# 解释:它可以解码为 "AB"(1 2)或者 "L"(12)。
#
# 示例 2:
# 输入:s = "226"
# 输出:3
# 解释:它可以解码为 "BZ" (2 26), "VF" (22 6), 或者 "BBF" (2 2 6) 。
#
# 示例 3:
# 输入:s = "0"
# 输出:0
# 解释:没有字符映射到以 0 开头的数字。
# 含有 0 的有效映射是 'J' -> "10" 和 'T'-> "20" 。
# 由于没有字符,因此没有有效的方法对此进行解码,因为所有数字都需要映射。
#
# 提示:
# 1 <= s.length <= 100
# s 只包含数字,并且可能包含前导零。
# 动态规划第一步要明确两点,「状态」和「选择」。
# 状态,就是对一个局面的描述。通过一个状态,可以定义一个子问题,而动态规划的核心就是分解为子问题。
# 选择,就是某个动作,通过一个动作,问题可以拆解为子问题
# 动态规划的框架如下:
# for 状态1 in 状态1的所有取值:
# for 状态2 in 状态2的所有取值:
# for ...
# dp[状态1][状态2][...] = 择优(选择1,选择2...)
#
# 本题中,“状态”就是带解码的字符串,
# 至于选择,对于每个字符串的最后一个字符,可以选择自成一体,或者选择与它前面的字符合体。
# 使用 dp[i] = x 表示 s[:i] 最多有 x 中解码方式。
# 对于 s[:i] 的最后一个字符 s[i-1],有如下几种情况
# 1. s[i-1] 自称一体,前提是 1 <= int(s[i-1]) <= 9,则 dp[i] = dp[i-1]
# 2. s[i-1] 和 s[i-2] 合体,前提是 s[i-2] != '0' 并且 1 <= int(s[i-2]) * 10 + int(s[i-1]) <= 26,则 dp[i] = dp[i-2]
# 两者之和就是最终 dp[i] 的值
# base case: dp[0] = 1, 表示空字符串也算是一种解码方法
# 另外由于 dp[i] 只依赖于 dp[i-1] 和 dp[i-2],所以可以压缩 dp 数组,只用 3 个变量即可
class Solution:
def numDecodings(self, s: str) -> int:
dp = [0 for _ in range(len(s)+1)]
dp[0] = 1
for i in range(1, len(s)+1):
x = 0
if 1 <= int(s[i-1]) <= 9:
x = dp[i-1]
if s[i-2] != '0' and 1 <= int(s[i-2])*10 + int(s[i-1]) <= 26:
x += dp[i-2]
dp[i] = x
return dp[len(s)]
if __name__ == '__main__':
s = '12'
print(Solution().numDecodings(s))
|
[
"jh.gao@ucloud.cn"
] |
jh.gao@ucloud.cn
|
835b080ae5e52498164715e7341be0d16a872109
|
a2f8b748a3427b8ffa622c96dc6a4f4339495672
|
/migrations/versions/12ae296935d5_.py
|
923e41d481cb910ac14eeab7c4f6ee0b1d665f64
|
[] |
no_license
|
quinnwu/pvapp
|
96242f6b6f1b1410fd4777579856d4ac8959dd47
|
db3c507b9d35fe468f5d358a41336fbfa26117e2
|
refs/heads/master
| 2021-03-27T14:54:11.295834
| 2018-04-28T18:09:35
| 2018-04-28T18:09:35
| 119,205,359
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 627
|
py
|
"""empty message
Revision ID: 12ae296935d5
Revises: 2af6e619b2f1
Create Date: 2016-01-03 19:20:57.386338
"""
# revision identifiers, used by Alembic.
revision = '12ae296935d5'
down_revision = '2af6e619b2f1'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('project', sa.Column('competitioncycle', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('project', 'competitioncycle')
### end Alembic commands ###
|
[
"wu.quinn@gmail.com"
] |
wu.quinn@gmail.com
|
7d121df9ea5860e1d137894783587cac87de54f9
|
0f4e610ca8a0be43674abe2c88c53af4eb5bd834
|
/codility/easy/1_MaxProductOfThree/dosun.py
|
d24e8b3bd3ae87e64c8b835f58e01391f70ffc5a
|
[] |
no_license
|
Jungeol/algorithm
|
6dde6f736159905dc3d7d88005f2b515dcd1b52d
|
459caa33681fe67801f0fac01f7de82456529ab1
|
refs/heads/master
| 2020-09-21T01:17:16.589098
| 2020-05-22T09:27:59
| 2020-05-22T09:27:59
| 224,638,291
| 2
| 0
| null | 2020-05-22T09:28:00
| 2019-11-28T11:27:35
|
Python
|
UTF-8
|
Python
| false
| false
| 365
|
py
|
"""https://app.codility.com/programmers/lessons/6-sorting/max_product_of_three/
Task Score :100%
Correctness : 100%
Performance : 100%
result: https://app.codility.com/demo/results/trainingBNAHGU-WCZ/
"""
def solution(A):
A.sort()
n = len(A)
product1 = A[0] * A[1] * A[n-1]
product2 = A[n-1] * A[n-2] * A[n-3]
return max(product1, product2)
|
[
"noreply@github.com"
] |
noreply@github.com
|
1f18c643dafb612801fe04bca072bfe0dace75d7
|
4a7705fb9b16d03377600f49770ae31b2c7358a5
|
/day9/gpzdsy股票最大收益2.py
|
a0c58c7282884d90b4b718cebb850ea29e7e0aee
|
[] |
no_license
|
dsgdtc/everything_arithmetic
|
600e5c4f8e95331689b73b27ee01432f196457ae
|
4b2d490c03467b7fa6cba36f9e27cf60bfce396c
|
refs/heads/master
| 2020-03-08T13:43:16.537525
| 2018-04-05T14:17:48
| 2018-04-05T14:35:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,692
|
py
|
# -*- coding: utf-8 -*-
"""
给定数组A,其中A[i]表示某股票第i天的价格。
如果允许最多进行K次交易(K是已经给定的定值),
请计算何时买卖达到最大收益,返回最大收益值。
规定 不能嵌套买卖 Z只能是买卖-买卖-买卖......
eg
[7,1,5,3,6,4]最大收益值为5-1=4,6-3=3,4+3 = 7
算法:
dp[k][i] 表示最多k次交易在第i天的最大收益
在第i天,有两种选择,要么卖出股票,要么不卖出股票,从而得到最大收益
dp[k][i] = max { dp[k][i-1] 不卖出 }
{ dp[k-1][j] + prices[i] - prices[j] , j属于[0,i-1] }
"""
__author__ = 'guyu'
def max_profit(A, size, K):
# dp[k][i] 表示最多K次交易在第i天的最大收益
# +1是为了好数数
dp = [[0 for col in range(size+1)] for row in range(K+1)]
profit = 0
price = A
price.insert(0, None) #首位占个空位置,为了方便天从第1天开始数
for k in range(1, K+1):
for i in range(1, size+1):
dp[k][i] = dp[k][i - 1] # 第i天不卖出时的价格
for j in range(1, i+1):
# print (dp[k][i-1])
# print (dp[k-1][j]+(price[i] - price[j]))
dp[k][i] = max(dp[k][i], dp[k-1][j]+(price[i] - price[j]) )
# print ("dp[%s][%s]设置为%s" %(k,i, dp[k][i]))
# print ("What is dp:%s" %(dp))
# input("etc...")
# print (dp)
# print (dp[K])
return dp[K][size-1]
return profit
if __name__ == "__main__":
A= [7,1,5,3,6,4]
size = len(A)
K = 3
result = max_profit(A, size, K)
print (result)
|
[
"dsgdtc@163.com"
] |
dsgdtc@163.com
|
3fcfb778b0855ff4cb8210f9e3e4818cf4cd7f03
|
c5b5a2375f83fa61a734aa4a87732d092108b1b8
|
/GaulToMosaic.py
|
a434e4ba5b59ff5fdceffe5573615da14d771271
|
[] |
no_license
|
Obywatelecki/ArcPy_scripts
|
3a0225834ee6df9f3b2746a86f6fe68277933cc8
|
81d6432f8cfcd866c078e7f0e0541efb13bb04d6
|
refs/heads/master
| 2021-01-24T20:48:02.941389
| 2018-07-24T19:51:19
| 2018-07-24T19:51:19
| 123,260,446
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,306
|
py
|
import time
print "Importing Arcpy...." + str(time.ctime())
import arcpy
print " Arcpy imported! " + str(time.ctime())
print "Setting local variables" + str(time.ctime())
arcpy.env.workspace = "D:/GD/IHPAN/Gaul/_Mapy/_metaarkusze/data.gdb"
# mxd = arcpy.mapping.MapDocument("D:/GD/WGiSR/_Konferencje/Plener 2018/heatMap/HeatMap.mxd")
# df = arcpy.mapping.ListDataFrames(mxd)[0]
print " Local variables set!" + str(time.ctime())
print "Clipping..." + str(time.ctime())
arcpy.Clip_management(
r"GAUL_RASTER\Babimost_A2_B2_meta.tif",
"265690.022579334 444111.323305845 333117.820225502 527358.613670745",
"D:\GD\IHPAN\Gaul\_Mapy\_metaarkusze\data.gdb\Babimost_clip",
r"GAUL_MASKS\POWIAT_Babimost",
256,
"ClippingGeometry",
"MAINTAIN_EXTENT")
arcpy.Clip_management(
r"GAUL_RASTER\Poznan_A1-B2_meta.tif",
"299400.899102051 470779.676501803 382321.502278291 540453.896805332",
"D:\GD\IHPAN\Gaul\_Mapy\_metaarkusze\data.gdb\Poznan_clip",
r"GAUL_MASKS\POWIAT_Poznań",
256,
"ClippingGeometry",
"MAINTAIN_EXTENT")
arcpy.Clip_management(
r"GAUL_RASTER\Srem_A2-B2_meta.tif",
"335720.040082338 441921.717819948 400351.860474886 515204.67834739",
"D:\GD\IHPAN\Gaul\_Mapy\_metaarkusze\data.gdb\Srem_clip",
r"GAUL_MASKS\POWIAT_Śrem",
256,
"ClippingGeometry",
"MAINTAIN_EXTENT")
arcpy.Clip_management(
r"GAUL_RASTER\Miedzyrzecz_A2-B2_meta.tif",
"231042.34059775 485283.89837235 332281.278737942 559072.743229139",
"D:\GD\IHPAN\Gaul\_Mapy\_metaarkusze\data.gdb\Miedzyrzecz_clip",
r"GAUL_MASKS\POWIAT_Międzyrzecz",
256,
"ClippingGeometry",
"MAINTAIN_EXTENT")
arcpy.Clip_management(
r"GAUL_RASTER\Wschowa_A2-B2_meta.tif",
"277331.797332692 411648.690308725 359810.429110255 482980.143615188",
"D:\GD\IHPAN\Gaul\_Mapy\_metaarkusze\data.gdb\Wschowa_clip",
r"GAUL_MASKS\POWIAT_Wschowa",
256,
"ClippingGeometry",
"MAINTAIN_EXTENT")
arcpy.Clip_management(
r"GAUL_RASTER\Krobia_A1_meta.tif",
"325559.668889663 387037.86742851 395016.309742185 470321.802898691",
"D:\GD\IHPAN\Gaul\_Mapy\_metaarkusze\data.gdb\Krobia_clip",
r"GAUL_MASKS\POWIAT_Krobia",
256,
"ClippingGeometry",
"MAINTAIN_EXTENT")
arcpy.Clip_management(
r"GAUL_RASTER\Oborniki_A1-B2_meta.tif",
"289538.110717687 498943.938028237 379936.142480935 573069.735483128",
"D:\GD\IHPAN\Gaul\_Mapy\_metaarkusze\data.gdb\Oborniki_clip",
r"GAUL_MASKS\POWIAT_Oborniki",
256,
"ClippingGeometry",
"MAINTAIN_EXTENT")
arcpy.Clip_management(
r"GAUL_RASTER\Koscian_A2-B2_meta.tif",
"302944.357398094 432303.434413203 369814.26984427 507153.17713879",
"D:\GD\IHPAN\Gaul\_Mapy\_metaarkusze\data.gdb\Koscian_clip",
r"GAUL_MASKS\POWIAT_Kościan",
256,
"ClippingGeometry",
"MAINTAIN_EXTENT")
print " Clipped!" + str(time.ctime())
print "Mosaicking rasters...." + str(time.ctime())
arcpy.MosaicToNewRaster_management(
"Babimost_clip; Koscian_clip; Oborniki_clip; Krobia_Clip; Wschowa_clip; Miedzyrzecz_clip; Srem_clip; Poznan_clip",
r"D:/GD/IHPAN/Gaul/_Mapy/_metaarkusze/data.gdb",
"GAUL_mosaicked",
"",
"8_BIT_UNSIGNED",
"",
3,
"FIRST",
"FIRST"
)
print " Rasters mosaicked!" + str(time.ctime())
|
[
"tpanecki@gmail.com"
] |
tpanecki@gmail.com
|
0876651216fe8d66b6ac1486bdb463a7eb6bcf0b
|
b37b62a73a14ed3904ffed1db99dafe01bc9eca3
|
/app/list/models.py
|
3c3e2f812571158f337b54618fddebb78ef4c17e
|
[] |
no_license
|
gambler1541/django-pagination
|
d340d7ce3186f801ce1cf4aadb59ee77bd52e9d6
|
44c32be793c0bd2332f29ba5422205ccf0c2d2b8
|
refs/heads/master
| 2020-04-16T22:56:16.565405
| 2019-01-16T06:59:51
| 2019-01-16T06:59:51
| 165,990,830
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
from django.db import models
from django.views.generic import ListView
class Constacts(models.Model):
text = models.TextField(default='')
|
[
"gambler1541@gmail.com"
] |
gambler1541@gmail.com
|
b11b2e7f23d825eb1fda17d1546294cfbf352e88
|
515870d521b3b3f8f8f4b2aebee593670b02e708
|
/src/Gon/realtime_starter_redis_queue.py
|
584c2277a120b800a36d7b503279b6c1219ba035
|
[
"MIT"
] |
permissive
|
jsyzc2019/Listed-company-news-crawl-and-text-analysis
|
2d806e8b3dfb2df97cd70908a365efc3e6b9ca1e
|
a5fb02dbfe2869b4016da06a3a15dd16171b6031
|
refs/heads/master
| 2023-07-07T19:12:46.259018
| 2023-01-13T16:03:48
| 2023-01-13T16:03:48
| 260,937,347
| 0
| 0
|
MIT
| 2020-05-03T14:09:11
| 2020-05-03T14:09:11
| null |
UTF-8
|
Python
| false
| false
| 480
|
py
|
import __init__
import redis
from Kite import config
from Killua.buildstocknewsdb import GenStockNewsDB
redis_client = redis.StrictRedis(config.REDIS_IP,
port=config.REDIS_PORT,
db=config.CACHE_RECORED_OPENED_PYTHON_PROGRAM_DB_ID)
redis_client.lpush(config.CACHE_RECORED_OPENED_PYTHON_PROGRAM_VAR, "realtime_starter_redis_queue.py")
gen_stock_news_db = GenStockNewsDB()
gen_stock_news_db.listen_redis_queue()
|
[
"bingzhenli@hotmail.com"
] |
bingzhenli@hotmail.com
|
b57deb3a8dace434bd99d855347a2ca3f1cf04e0
|
f714430490229ce0e8d5e160fdb3bfbc041173e3
|
/migrations/versions/51f1ee7915bf_migrate.py
|
9b239b5d4641e189342b12852593130894f562c4
|
[] |
no_license
|
HEW2meiG/HEW2
|
717fa1fae135b20617c53727005c6940b401b0f8
|
f8626b8edd2d4b0f8fc915acd45062a02399ef40
|
refs/heads/master
| 2023-03-14T13:54:22.187884
| 2021-03-12T16:50:39
| 2021-03-12T16:50:39
| 285,750,649
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,528
|
py
|
"""migrate
Revision ID: 51f1ee7915bf
Revises:
Create Date: 2021-02-04 00:17:37.826629
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '51f1ee7915bf'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('BuyCredit',
sa.Column('BuyCredit_id', sa.Integer(), nullable=False),
sa.Column('credit_name', sa.String(length=255), nullable=False),
sa.Column('credit_num', sa.Integer(), nullable=False),
sa.Column('expire', sa.Date(), nullable=False),
sa.Column('security_code_hash', sa.String(length=255), nullable=False),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.PrimaryKeyConstraint('BuyCredit_id')
)
op.create_table('BuyShippingAddress',
sa.Column('BuyShippingAddress_id', sa.Integer(), nullable=False),
sa.Column('last_name', sa.String(length=255), nullable=False),
sa.Column('first_name', sa.String(length=255), nullable=False),
sa.Column('last_name_kana', sa.String(length=255), nullable=False),
sa.Column('first_name_kana', sa.String(length=255), nullable=False),
sa.Column('zip_code', sa.Integer(), nullable=False),
sa.Column('prefecture', sa.String(length=64), nullable=False),
sa.Column('address1', sa.String(length=255), nullable=False),
sa.Column('address2', sa.String(length=255), nullable=False),
sa.Column('address3', sa.String(length=255), nullable=True),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.PrimaryKeyConstraint('BuyShippingAddress_id')
)
op.create_table('Credit',
sa.Column('Credit_id', sa.Integer(), nullable=False),
sa.Column('User_id', sa.Integer(), nullable=False),
sa.Column('credit_name', sa.String(length=255), nullable=False),
sa.Column('credit_num', sa.Integer(), nullable=False),
sa.Column('expire', sa.Date(), nullable=False),
sa.Column('security_code_hash', sa.String(length=255), nullable=False),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.ForeignKeyConstraint(['User_id'], ['User.User_id'], ),
sa.PrimaryKeyConstraint('Credit_id')
)
op.create_table('ShippingAddress',
sa.Column('ShippingAddress_id', sa.Integer(), nullable=False),
sa.Column('User_id', sa.Integer(), nullable=False),
sa.Column('last_name', sa.String(length=255), nullable=False),
sa.Column('first_name', sa.String(length=255), nullable=False),
sa.Column('last_name_kana', sa.String(length=255), nullable=False),
sa.Column('first_name_kana', sa.String(length=255), nullable=False),
sa.Column('zip_code', sa.Integer(), nullable=False),
sa.Column('prefecture', sa.String(length=64), nullable=False),
sa.Column('address1', sa.String(length=255), nullable=False),
sa.Column('address2', sa.String(length=255), nullable=False),
sa.Column('address3', sa.String(length=255), nullable=True),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.ForeignKeyConstraint(['User_id'], ['User.User_id'], ),
sa.PrimaryKeyConstraint('ShippingAddress_id')
)
op.create_table('User',
sa.Column('User_id', sa.Integer(), nullable=False),
sa.Column('user_code', sa.String(length=64), nullable=False),
sa.Column('username', sa.String(length=64), nullable=False),
sa.Column('email', sa.String(length=64), nullable=False),
sa.Column('password_hash', sa.String(length=128), nullable=False),
sa.Column('picture_path', sa.Text(), nullable=False),
sa.Column('prof_comment', sa.Text(), nullable=True),
sa.Column('default_ShippingAddress_id', sa.Integer(), nullable=True),
sa.Column('default_pay_way', sa.Integer(), nullable=False),
sa.Column('default_Credit_id', sa.Integer(), nullable=True),
sa.Column('is_active', sa.Boolean(), nullable=True),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.ForeignKeyConstraint(['default_Credit_id'], ['Credit.Credit_id'], ),
sa.ForeignKeyConstraint(['default_ShippingAddress_id'], ['ShippingAddress.ShippingAddress_id'], ),
sa.PrimaryKeyConstraint('User_id')
)
op.create_index(op.f('ix_User_email'), 'User', ['email'], unique=True)
op.create_index(op.f('ix_User_user_code'), 'User', ['user_code'], unique=True)
op.create_index(op.f('ix_User_username'), 'User', ['username'], unique=False)
op.create_table('UserTempToken',
sa.Column('UserTempTokenToken_id', sa.Integer(), nullable=False),
sa.Column('token', sa.String(length=64), nullable=False),
sa.Column('email', sa.String(length=64), nullable=False),
sa.Column('expire_at', sa.DateTime(), nullable=False),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.PrimaryKeyConstraint('UserTempTokenToken_id'),
sa.UniqueConstraint('email')
)
op.create_index(op.f('ix_UserTempToken_token'), 'UserTempToken', ['token'], unique=True)
op.create_table('Address',
sa.Column('Address_id', sa.Integer(), nullable=False),
sa.Column('User_id', sa.Integer(), nullable=False),
sa.Column('zip_code', sa.Integer(), nullable=False),
sa.Column('prefecture', sa.String(length=64), nullable=False),
sa.Column('address1', sa.String(length=255), nullable=False),
sa.Column('address2', sa.String(length=255), nullable=False),
sa.Column('address3', sa.String(length=255), nullable=True),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.ForeignKeyConstraint(['User_id'], ['User.User_id'], ),
sa.PrimaryKeyConstraint('Address_id')
)
op.create_table('MailResetToken',
sa.Column('MailResetToken_id', sa.Integer(), nullable=False),
sa.Column('token', sa.String(length=64), nullable=False),
sa.Column('User_id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=64), nullable=False),
sa.Column('expire_at', sa.DateTime(), nullable=False),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.ForeignKeyConstraint(['User_id'], ['User.User_id'], ),
sa.PrimaryKeyConstraint('MailResetToken_id'),
sa.UniqueConstraint('email')
)
op.create_index(op.f('ix_MailResetToken_token'), 'MailResetToken', ['token'], unique=True)
op.create_table('PasswordResetToken',
sa.Column('PasswordResetToken_id', sa.Integer(), nullable=False),
sa.Column('token', sa.String(length=64), nullable=False),
sa.Column('User_id', sa.Integer(), nullable=False),
sa.Column('expire_at', sa.DateTime(), nullable=False),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.ForeignKeyConstraint(['User_id'], ['User.User_id'], ),
sa.PrimaryKeyConstraint('PasswordResetToken_id')
)
op.create_index(op.f('ix_PasswordResetToken_token'), 'PasswordResetToken', ['token'], unique=True)
op.create_table('Sell',
sa.Column('Sell_id', sa.Integer(), nullable=False),
sa.Column('User_id', sa.Integer(), nullable=False),
sa.Column('sell_title', sa.String(length=255), nullable=False),
sa.Column('key1', sa.String(length=255), nullable=False),
sa.Column('key2', sa.String(length=255), nullable=False),
sa.Column('key3', sa.String(length=255), nullable=False),
sa.Column('sell_comment', sa.Text(), nullable=False),
sa.Column('price', sa.Integer(), nullable=False),
sa.Column('item_picture_path', sa.Text(), nullable=False),
sa.Column('genre', sa.Integer(), nullable=False),
sa.Column('item_state', sa.Integer(), nullable=False),
sa.Column('postage', sa.Integer(), nullable=False),
sa.Column('send_way', sa.Integer(), nullable=False),
sa.Column('consignor', sa.String(length=64), nullable=False),
sa.Column('schedule', sa.Integer(), nullable=False),
sa.Column('remarks', sa.Text(), nullable=True),
sa.Column('deal_status', sa.Integer(), nullable=False),
sa.Column('sell_flg', sa.Boolean(), nullable=False),
sa.Column('is_active', sa.Boolean(), nullable=False),
sa.Column('has_sent', sa.Boolean(), nullable=False),
sa.Column('has_got', sa.Boolean(), nullable=False),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.ForeignKeyConstraint(['User_id'], ['User.User_id'], ),
sa.PrimaryKeyConstraint('Sell_id')
)
op.create_table('UserConnect',
sa.Column('UserConnect_id', sa.Integer(), nullable=False),
sa.Column('to_user_id', sa.Integer(), nullable=False),
sa.Column('from_user_id', sa.Integer(), nullable=False),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.ForeignKeyConstraint(['from_user_id'], ['User.User_id'], ),
sa.ForeignKeyConstraint(['to_user_id'], ['User.User_id'], ),
sa.PrimaryKeyConstraint('UserConnect_id')
)
op.create_table('UserInfo',
sa.Column('UserInfo_id', sa.Integer(), nullable=False),
sa.Column('User_id', sa.Integer(), nullable=False),
sa.Column('last_name', sa.String(length=255), nullable=False),
sa.Column('first_name', sa.String(length=255), nullable=False),
sa.Column('last_name_kana', sa.String(length=255), nullable=False),
sa.Column('first_name_kana', sa.String(length=255), nullable=False),
sa.Column('birth', sa.Date(), nullable=False),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.ForeignKeyConstraint(['User_id'], ['User.User_id'], ),
sa.PrimaryKeyConstraint('UserInfo_id')
)
op.create_table('BrowsingHistory',
sa.Column('BrowsingHistory_id', sa.Integer(), nullable=False),
sa.Column('Sell_id', sa.Integer(), nullable=False),
sa.Column('User_id', sa.Integer(), nullable=False),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.ForeignKeyConstraint(['Sell_id'], ['Sell.Sell_id'], ),
sa.ForeignKeyConstraint(['User_id'], ['User.User_id'], ),
sa.PrimaryKeyConstraint('BrowsingHistory_id')
)
op.create_table('Buy',
sa.Column('Buy_id', sa.Integer(), nullable=False),
sa.Column('User_id', sa.Integer(), nullable=False),
sa.Column('Sell_id', sa.Integer(), nullable=False),
sa.Column('pay_way', sa.Integer(), nullable=False),
sa.Column('Credit_id', sa.Integer(), nullable=False),
sa.Column('ShippingAddress_id', sa.Integer(), nullable=False),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.ForeignKeyConstraint(['Credit_id'], ['BuyCredit.BuyCredit_id'], ),
sa.ForeignKeyConstraint(['Sell_id'], ['Sell.Sell_id'], ),
sa.ForeignKeyConstraint(['ShippingAddress_id'], ['BuyShippingAddress.BuyShippingAddress_id'], ),
sa.ForeignKeyConstraint(['User_id'], ['User.User_id'], ),
sa.PrimaryKeyConstraint('Buy_id')
)
op.create_table('DealMessage',
sa.Column('DealMessage_id', sa.Integer(), nullable=False),
sa.Column('Sell_id', sa.Integer(), nullable=False),
sa.Column('to_user_id', sa.Integer(), nullable=False),
sa.Column('from_user_id', sa.Integer(), nullable=False),
sa.Column('message', sa.Text(), nullable=False),
sa.Column('is_read', sa.Boolean(), nullable=False),
sa.Column('is_checked', sa.Boolean(), nullable=False),
sa.Column('is_active', sa.Boolean(), nullable=False),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.ForeignKeyConstraint(['Sell_id'], ['Sell.Sell_id'], ),
sa.ForeignKeyConstraint(['from_user_id'], ['User.User_id'], ),
sa.ForeignKeyConstraint(['to_user_id'], ['User.User_id'], ),
sa.PrimaryKeyConstraint('DealMessage_id')
)
op.create_table('Likes',
sa.Column('Sell_id', sa.Integer(), nullable=False),
sa.Column('User_id', sa.Integer(), nullable=False),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.ForeignKeyConstraint(['Sell_id'], ['Sell.Sell_id'], ),
sa.ForeignKeyConstraint(['User_id'], ['User.User_id'], ),
sa.PrimaryKeyConstraint('Sell_id', 'User_id')
)
op.create_table('PostMessage',
sa.Column('PostMessage_id', sa.Integer(), nullable=False),
sa.Column('Sell_id', sa.Integer(), nullable=False),
sa.Column('from_user_id', sa.Integer(), nullable=False),
sa.Column('message', sa.Text(), nullable=False),
sa.Column('is_read', sa.Boolean(), nullable=False),
sa.Column('is_active', sa.Boolean(), nullable=False),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.ForeignKeyConstraint(['Sell_id'], ['Sell.Sell_id'], ),
sa.ForeignKeyConstraint(['from_user_id'], ['User.User_id'], ),
sa.PrimaryKeyConstraint('PostMessage_id')
)
op.create_table('Rating',
sa.Column('Rating_id', sa.Integer(), nullable=False),
sa.Column('Sell_id', sa.Integer(), nullable=False),
sa.Column('to_user_id', sa.Integer(), nullable=False),
sa.Column('from_user_id', sa.Integer(), nullable=False),
sa.Column('rating', sa.Integer(), nullable=False),
sa.Column('rating_message', sa.Text(), nullable=True),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.ForeignKeyConstraint(['Sell_id'], ['Sell.Sell_id'], ),
sa.ForeignKeyConstraint(['from_user_id'], ['User.User_id'], ),
sa.ForeignKeyConstraint(['to_user_id'], ['User.User_id'], ),
sa.PrimaryKeyConstraint('Rating_id')
)
op.drop_table('sessions')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('sessions',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('session_id', sa.VARCHAR(length=255), nullable=True),
sa.Column('data', sa.TEXT(), nullable=True),
sa.Column('expiry', sa.DATETIME(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('session_id')
)
op.drop_table('Rating')
op.drop_table('PostMessage')
op.drop_table('Likes')
op.drop_table('DealMessage')
op.drop_table('Buy')
op.drop_table('BrowsingHistory')
op.drop_table('UserInfo')
op.drop_table('UserConnect')
op.drop_table('Sell')
op.drop_index(op.f('ix_PasswordResetToken_token'), table_name='PasswordResetToken')
op.drop_table('PasswordResetToken')
op.drop_index(op.f('ix_MailResetToken_token'), table_name='MailResetToken')
op.drop_table('MailResetToken')
op.drop_table('Address')
op.drop_index(op.f('ix_UserTempToken_token'), table_name='UserTempToken')
op.drop_table('UserTempToken')
op.drop_index(op.f('ix_User_username'), table_name='User')
op.drop_index(op.f('ix_User_user_code'), table_name='User')
op.drop_index(op.f('ix_User_email'), table_name='User')
op.drop_table('User')
op.drop_table('ShippingAddress')
op.drop_table('Credit')
op.drop_table('BuyShippingAddress')
op.drop_table('BuyCredit')
# ### end Alembic commands ###
|
[
"mei.shimomura@icloud.com"
] |
mei.shimomura@icloud.com
|
9a518550ecc9610bfeed5e94cc14082c1480cbad
|
526176649fc3d37c87c06626a2e8fcb1cc840bf0
|
/sqlite_db/db6.py
|
8717163a9439512d44881c22e9bb759d7bff7640
|
[] |
no_license
|
rames4498/Bootcamps_and_workshops
|
cd193bb302f4b2ed9037750b07e35f6875415476
|
402ef143be7a52ae71e08cdf8b7f0ff35d502455
|
refs/heads/master
| 2022-09-22T04:49:10.657585
| 2022-09-13T07:06:36
| 2022-09-13T07:06:36
| 239,116,561
| 9
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 376
|
py
|
import sqlite3
conn = sqlite3.connect('my_data.sqlite')
cursor = conn.cursor()
print("Opened database successfully")
cursor.execute('''CREATE TABLE SCHOOL
(ID INT PRIMARY KEY NOT NULL,
NAME TEXT NOT NULL,
AGE INT NOT NULL,
ADDRESS CHAR(50),
MARKS INT);''')
cursor.close()
|
[
"noreply@github.com"
] |
noreply@github.com
|
e990c045732a8d86cc9581ce152319b250823e60
|
a6efd75e038b6d2c28fc74a34ad9454c4f70da0c
|
/resnet_3d/train_TReNDs.py
|
f554f9b8ef6ba49f0963ed509a2f7df82146a7b8
|
[] |
no_license
|
qkqkfldis1/TRENDS_kaggle
|
a5886fde100364acef50763d621a7067893326d7
|
3d0c60a42afb654dbfcbdfe69b113a636d8bb00d
|
refs/heads/main
| 2023-04-18T10:47:21.418371
| 2021-05-02T20:02:12
| 2021-05-02T20:02:12
| 363,738,896
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,820
|
py
|
'''
Written by SeuTao
'''
import os
import time
import numpy as np
import torch
from setting import parse_opts
from torch.utils.data import DataLoader
from datasets.TReNDs import TReNDsDataset
from model import generate_model
from tqdm import tqdm
import random
#from apex import amp, optimizers
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]="4"
#device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
seed = 42
print(f'setting everything to seed {seed}')
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def metric(y_true, y_pred):
return np.mean(np.sum(np.abs(y_true - y_pred), axis=0) / np.sum(y_true, axis=0))
def weighted_nae(inp, targ):
W = torch.FloatTensor([0.3, 0.175, 0.175, 0.175, 0.175])
return torch.mean(torch.matmul(torch.abs(inp - targ), W.to(device) / torch.mean(targ, axis=0)))
def valid(data_loader, model, sets):
# settings
print("validation")
model.eval()
y_pred = []
y_true = []
loss_ave = []
with torch.no_grad():
for batch_data in tqdm(data_loader):
# getting data batch
volumes, feats, fncs, degs, label = batch_data
if not sets.no_cuda:
volumes = volumes.to(device)
feats = feats.to(device)
fncs = fncs.to(device)
degs = degs.to(device)
label = label.to(device)
logits = model(volumes, feats, fncs, degs)
# calculating loss
loss_value = weighted_nae(logits, label)
y_pred.append(logits.data.cpu().numpy())
y_true.append(label.data.cpu().numpy())
loss_ave.append(loss_value.data.cpu().numpy())
print('valid loss', np.mean(loss_ave))
y_pred = np.concatenate(y_pred,axis=0)
y_true = np.concatenate(y_true,axis=0)
domain = ['age', 'domain1_var1', 'domain1_var2', 'domain2_var1', 'domain2_var2']
w = [0.3, 0.175, 0.175, 0.175, 0.175]
m_all = 0
for i in range(5):
m = metric(y_true[:,i], y_pred[:,i])
print(domain[i],'metric:', m)
m_all += m*w[i]
print('all_metric:', m_all)
model.train()
return np.mean(loss_ave)
def test(data_loader, model, sets, save_path):
# settings
print("validation")
model.eval()
y_pred = []
ids_all = []
with torch.no_grad():
for batch_data in tqdm(data_loader):
# getting data batch
ids, volumes, feats, fncs, degs = batch_data
if not sets.no_cuda:
volumes = volumes.to(device)
feats = feats.to(device)
fncs = feats.to(device)
degs = degs.to(device)
logits = model(volumes, feats, fncs, degs)
y_pred.append(logits.data.cpu().numpy())
ids_all += ids
y_pred = np.concatenate(y_pred, axis=0)
np.savez_compressed(save_path,
y_pred = y_pred,
ids = ids_all)
print(y_pred.shape)
def train(train_loader,valid_loader, model, optimizer, total_epochs, save_interval, save_folder, sets):
f = open(os.path.join(save_folder,'log.txt'),'w')
# settings
batches_per_epoch = len(train_loader)
print("Current setting is:")
print(sets)
print("\n\n")
model.train()
train_time_sp = time.time()
valid_loss = 99999
min_loss = 99999
for epoch in range(total_epochs):
rate = adjust_learning_rate(optimizer, epoch)
# Training
# log.info('lr = {}'.format(scheduler.get_lr()))
tk0 = tqdm(train_loader, total=int(len(train_loader)))
for batch_id, batch_data in enumerate(tk0):
# getting data batch
batch_id_sp = epoch * batches_per_epoch
volumes, feats, fncs, degs, label = batch_data
if not sets.no_cuda:
volumes = volumes.to(device)
feats = feats.to(device)
fncs = fncs.to(device)
degs = degs.to(device)
label = label.to(device)
optimizer.zero_grad()
logits = model(volumes, feats, fncs, degs)
# calculating loss
loss = weighted_nae(logits, label)
#with amp.scale_loss(loss, optimizer) as scaled_loss:
# scaled_loss.backward()
loss.backward()
optimizer.step()
avg_batch_time = (time.time() - train_time_sp) / (1 + batch_id_sp)
log_ = '{} Batch: {}-{} ({}), ' \
'lr = {:.5f}, ' \
'train loss = {:.3f}, ' \
'valid loss = {:.3f}, ' \
'avg_batch_time = {:.3f} '.format(sets.model_name, epoch, batch_id, batch_id_sp, rate, loss.item(), valid_loss, avg_batch_time)
#print(log_)
f.write(log_ + '\n')
f.flush()
# valid
valid_loss = valid(valid_loader,model,sets)
if valid_loss < min_loss:
min_loss = valid_loss
model_save_path = '{}/epoch_{}_batch_{}_loss_{}.pth.tar'.format(save_folder, epoch, batch_id, valid_loss)
model_save_dir = os.path.dirname(model_save_path)
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
log_ = 'Save checkpoints: epoch = {}, batch_id = {}'.format(epoch, batch_id)
print(log_)
f.write(log_ + '\n')
torch.save({'epoch': epoch,
'batch_id': batch_id,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict()},
model_save_path)
print('Finished training')
f.close()
import torch
import torch.nn as nn
import torch.nn.functional as F
class MishFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x * torch.tanh(F.softplus(x)) # x * tanh(ln(1 + exp(x)))
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_variables[0]
sigmoid = torch.sigmoid(x)
tanh_sp = torch.tanh(F.softplus(x))
return grad_output * (tanh_sp + x * sigmoid * (1 - tanh_sp * tanh_sp))
class Mish(nn.Module):
def forward(self, x):
return MishFunction.apply(x)
def to_Mish(model):
for child_name, child in model.named_children():
if isinstance(child, nn.ReLU):
setattr(model, child_name, Mish())
else:
to_Mish(child)
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = 3e-4 * (0.9 ** epoch)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
if __name__ == '__main__':
sets = parse_opts()
sets.no_cuda = False
sets.resume_path = None
sets.pretrain_path = None
sets.model_name = r'prue_3dconv'
sets.save_folder = r'./TReNDs/{}/' \
r'models_{}_{}_{}_fold_{}'.format(sets.model_name, 'resnet',sets.model_depth,sets.resnet_shortcut,sets.fold_index)
if not os.path.exists(sets.save_folder):
os.makedirs(sets.save_folder)
# getting model
torch.manual_seed(sets.manual_seed)
model, parameters = generate_model(sets)
model = model.to(device)
to_Mish(model)
print(model)
print(device)
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
lr=3e-4,
betas=(0.9, 0.999),
eps=1e-08)
#model, optimizer = amp.initialize(model, optimizer,
# opt_level='O1',
# verbosity=0
# )
model = torch.nn.DataParallel(model).to(device)
# train from resume
if sets.resume_path:
if os.path.isfile(sets.resume_path):
print("=> loading checkpoint '{}'".format(sets.resume_path))
checkpoint = torch.load(sets.resume_path)
model.load_state_dict(checkpoint['state_dict'])
# getting data
sets.phase = 'train'
if sets.no_cuda:
sets.pin_memory = False
else:
sets.pin_memory = True
train_dataset = TReNDsDataset(mode='train', fold_index=sets.fold_index)
train_loader = DataLoader(train_dataset, batch_size=sets.batch_size,
shuffle=True, num_workers=sets.num_workers,drop_last=True)
valid_dataset = TReNDsDataset(mode='valid', fold_index=sets.fold_index)
valid_loader = DataLoader(valid_dataset, batch_size=sets.batch_size,
shuffle=False, num_workers=sets.num_workers, drop_last=False)
# # training
train(train_loader, valid_loader,model, optimizer,
total_epochs=sets.n_epochs,
save_interval=sets.save_intervals,
save_folder=sets.save_folder, sets=sets)
# # validate
#valid(valid_loader, model, sets)
# test_dataset = TReNDsDataset(mode='test', fold_index=sets.fold_index)
# test_loader = DataLoader(test_dataset, batch_size=sets.batch_size,
# shuffle=False, num_workers=sets.num_workers,
# pin_memory=sets.pin_memory, drop_last=False)
# test(test_loader, model, sets, sets.resume_path.replace('.pth.tar','.npz'))
|
[
"bshz15@gmail.com"
] |
bshz15@gmail.com
|
1d14e48594666b2b66dcfaef63ff96ea4d743632
|
e9266d5632d8d0da25d95dc0fd912379335328e0
|
/src/plants/schemas.py
|
d54a2c19e2c5665ffbaff4e9e9c05879b3e997be
|
[] |
no_license
|
capmayer/plantas-indicadoras
|
5921df6d634afc3df2b4e94db2b95418d3787fdc
|
174a8288ff2fdd6a259a669a7ec776d3721f239b
|
refs/heads/main
| 2023-06-22T09:51:10.877814
| 2021-07-29T00:05:39
| 2021-07-29T00:05:39
| 389,454,039
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 290
|
py
|
from pydantic import BaseModel
class PlantBase(BaseModel):
scientific_name: str
popular_names: str
description: str
indicates: str
class PlantList(PlantBase):
scientific_name_slug: str
class Plant(PlantList):
id: int
class Config:
orm_mode = True
|
[
"henriqmayer@gmail.com"
] |
henriqmayer@gmail.com
|
8083d6ab3311a0ec517636a91fd33a22445421bd
|
7fa15c4dbca224aed616e76074bf017699af00df
|
/examples/sum_client.py
|
0011bc63474cfec50e1d633ae091f99a0ddb1f0e
|
[
"Apache-2.0"
] |
permissive
|
studio-ousia/mprpc
|
cc272e650b46a21997c680cf00e5ccbc015dc709
|
6076f68a16f78e0010307344afa253e0956f2a9d
|
refs/heads/master
| 2023-01-14T02:33:22.171728
| 2022-12-27T07:13:23
| 2022-12-27T07:13:23
| 13,551,567
| 170
| 60
|
NOASSERTION
| 2023-02-18T15:15:10
| 2013-10-14T03:15:41
|
Cython
|
UTF-8
|
Python
| false
| false
| 595
|
py
|
# -*- coding: utf-8 -*-
import gsocketpool.pool
import gevent.pool
from mprpc import RPCClient, RPCPoolClient
def call():
client = RPCClient('127.0.0.1', 6000)
print client.call('sum', 1, 2)
def call_using_pool():
options = dict(host='127.0.0.1', port=6000)
client_pool = gsocketpool.pool.Pool(RPCPoolClient, options)
def _call(n):
with client_pool.connection() as client:
return client.call('sum', 1, 2)
glet_pool = gevent.pool.Pool(10)
print [result for result in glet_pool.imap_unordered(_call, xrange(10))]
call()
call_using_pool()
|
[
"ikuya@ikuya.net"
] |
ikuya@ikuya.net
|
25ed4fc80f15bd27a6243626cc74db6d6f20abe2
|
8bb3bcf914860c20fb4a7163a8e0691cd802dd65
|
/ve/unit/test_list_object.py
|
df090cc057e76b5308629ac65f3383056bb0ac50
|
[
"Apache-2.0"
] |
permissive
|
nitinm694/pyvsc
|
8586cc2497f336289fecbfeb9e6dd788f4070b60
|
612de9e6244c685a3df1972e4860abfe35b614e1
|
refs/heads/master
| 2023-07-28T01:49:10.917496
| 2021-09-12T19:06:00
| 2021-09-12T19:06:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,362
|
py
|
'''
Created on Jun 20, 2020
@author: ballance
'''
import vsc
from vsc_test_case import VscTestCase
from vsc.visitors.model_pretty_printer import ModelPrettyPrinter
class TestListObject(VscTestCase):
def test_smoke(self):
@vsc.randobj
class item_c(object):
def __init__(self):
self.a = vsc.rand_uint8_t()
self.b = vsc.rand_uint8_t()
@vsc.randobj
class container_c(object):
def __init__(self):
self.l = vsc.rand_list_t(item_c())
for i in range(10):
self.l.append(item_c())
c = container_c()
c.randomize()
for i,it in enumerate(c.l):
print("Item[" + str(i) + "] a=" + str(it.a) + " b=" + str(it.b))
def test_constraints(self):
@vsc.randobj
class item_c(object):
def __init__(self):
self.a = vsc.rand_uint8_t()
self.b = vsc.rand_uint8_t()
@vsc.randobj
class container_c(object):
def __init__(self):
self.l = vsc.rand_list_t(item_c())
for i in range(10):
self.l.append(item_c())
@vsc.constraint
def all_eq_c(self):
with vsc.foreach(self.l) as it:
it.a == it.b
c = container_c()
for i in range(100):
c.randomize()
for it in c.l:
self.assertEqual(it.a, it.b)
def test_init_array_block(self):
@vsc.randobj
class item_c(object):
def __init__(self):
self.a = vsc.rand_uint8_t()
self.b = vsc.rand_uint8_t()
@vsc.randobj
class container_c(object):
def __init__(self):
self.l = vsc.rand_list_t(item_c())
for i in range(10):
self.l.append(item_c())
@vsc.constraint
def all_eq_c(self):
with vsc.foreach(self.l, it=True,idx=True) as (idx,it):
with vsc.if_then((idx&1) == 0):
it.a < it.b
with vsc.else_then:
it.a > it.b
c = container_c()
for i in range(100):
c.randomize()
self.assertEqual(10, len(c.l))
for i,it in enumerate(c.l):
if (i%2) == 0:
self.assertLess(it.a, it.b)
else:
self.assertGreater(it.a, it.b)
def test_diff_classes(self):
@vsc.randobj
class item_c(object):
def __init__(self):
self.a = vsc.rand_uint8_t()
self.b = vsc.rand_uint8_t()
@vsc.randobj
class item_c_1(item_c):
def __init__(self):
super().__init__()
@vsc.constraint
def a_lt_b_c(self):
self.a < self.b
@vsc.randobj
class item_c_2(item_c):
def __init__(self):
super().__init__()
@vsc.constraint
def a_gt_b_c(self):
self.a > self.b
@vsc.randobj
class container_c(object):
def __init__(self):
self.l = vsc.rand_list_t(item_c())
for i in range(10):
if i%2 == 0:
self.l.append(item_c_1())
else:
self.l.append(item_c_2())
c = container_c()
print("Model: " + ModelPrettyPrinter.print(c.get_model()))
for i in range(100):
c.randomize()
self.assertEqual(10, len(c.l))
for i,it in enumerate(c.l):
if i%2 == 0:
self.assertLess(it.a, it.b)
else:
self.assertGreater(it.a, it.b)
|
[
"matt.ballance@gmail.com"
] |
matt.ballance@gmail.com
|
fb20a737b4b3bc2e0a86a1ea9b5a7945456c6851
|
dacdebab897f9287f37a2e85c5705a926ddd36aa
|
/tests/test_issue930/Snakefile
|
06cbf60fd181788b35dd44ff28d8bc6855f13952
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
snakemake/snakemake
|
5d4528193d87786d7b372ca7653ece302ff46965
|
27b224ed12448df8aebc7d1ff8f25e3bf7622232
|
refs/heads/main
| 2023-09-02T08:37:04.323976
| 2023-08-11T10:02:34
| 2023-08-11T10:02:34
| 212,840,200
| 1,941
| 536
|
MIT
| 2023-09-11T09:51:44
| 2019-10-04T14:58:11
|
HTML
|
UTF-8
|
Python
| false
| false
| 646
|
samples = ["0","1"]
rule all:
input:
"test.out"
rule build_index:
output:
"large_reference_index"
shell:
"touch {output}"
rule a:
output:
"a/{sample}.out"
group:
"sample_group"
shell:
"touch {output}"
rule b:
input:
rules.a.output,
rules.build_index.output
output:
"b/{sample}.out"
group:
"sample_group"
shell:
"touch {output}"
rule c:
input:
expand("a/{sample}.out", sample=samples),
expand("b/{sample}.out", sample=samples)
output:
"test.out"
shell:
"touch {output}"
|
[
"johannes.koester@tu-dortmund.de"
] |
johannes.koester@tu-dortmund.de
|
|
8c6f5b33b6a30d003e781cf744afdf6b61f3b51e
|
68ddfb2d1dad5399cb224a1c2a5f7cd5aa87ebf7
|
/HW04/HW04.py
|
de3167147296186ffd9e025bad32324202ad8683
|
[] |
no_license
|
VanSubstance/data_Analysis
|
d78d4724d0521cb954bcee624d646c61d37dc9a1
|
0dd6e2689c9b576dd39af1660ef70f8e13dfb2f3
|
refs/heads/master
| 2023-01-09T23:16:41.576145
| 2020-11-17T13:58:43
| 2020-11-17T13:58:43
| 294,380,757
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,728
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 25 21:21:00 2020
@author: sungh
"""
#%% Initiating
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
from pandas.plotting import scatter_matrix
import matplotlib.pyplot as plt
from dateutil.parser import parse
from scipy import stats, polyval
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.neighbors import KNeighborsRegressor
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import KFold, StratifiedKFold, GroupKFold
from sklearn.model_selection import cross_val_score as cvs
train=pd.read_csv('https://drive.google.com/uc?export=download&id=1KA7mKUmQv4PrF-qMFrH35LN6q_i56Bf1',
header = 0, dtype={'StateHoliday':'str'})
store=pd.read_csv('https://drive.google.com/uc?export=download&id=1_o04Vnqzo3v-MTk20MF3OMw2QFz0Fbo0')
tgt = 'Sales'
train.columns
vals = ['Store', 'DayOfWeek', 'Date', 'Customers', 'Open', 'Promo', 'StateHoliday', 'SchoolHoliday']
#%% Conclusion
discards = ['SchoolHoliday', 'StateHoliday', 'Promo', 'Store']
selects = ['Date', 'Customers', 'Open', 'DayOfWeek']
train = train.drop(discards, axis = 1)
newDay = train['DayOfWeek'] != 7
newDay = newDay.astype(int)
train = train.drop(['DayOfWeek'], axis = 1)
train = pd.concat((train, newDay), axis = 1)
condTrain = (train['Date'] < '2015-01-01')
Xtrain = train[condTrain][selects].drop(['Date'], axis = 1).values
ytrain = train[condTrain]['Sales'].values
Xtest = train[condTrain != True][selects].drop(['Date'], axis = 1).values
ytest = train[condTrain != True]['Sales'].values
#%% Cross validation -> Failed
C_s = np.logspace(-10, 0, 10)
logistic = LogisticRegression()
skf = StratifiedKFold(n_splits = 5, shuffle = True, random_state = 100)
kf = KFold(n_splits = 3, shuffle = True, random_state = 100)
Xtest[0:236380]
ytest[0:236380]
score = cvs(logistic, Xtrain, ytrain, cv = kf)
accs = []
for c in C_s:
logistic.C = c
temp = []
print("C!\t")
for Ptrain, Ptest in skf.split(Xtest, ytest):
print("Fit!\t")
logistic.fit(Xtest[Ptrain], ytest[Ptest])
temp.append(logistic.score(Xtest[Ptrain], ytest[Ptest]))
print("Append!\n")
accs.append(temp)
accs = np.array(accs)
avg = np.mean(accs, axis = 1)
C_s[np.argmax(avg)]
#%% Learning Method: Linear Regression
train=pd.read_csv('https://drive.google.com/uc?export=download&id=1KA7mKUmQv4PrF-qMFrH35LN6q_i56Bf1',
header = 0, dtype={'StateHoliday':'str'})
discards = ['SchoolHoliday', 'StateHoliday', 'Promo', 'Store']
selects = ['Date', 'Customers', 'Open', 'DayOfWeek']
train = train.drop(discards, axis = 1)
newDay = train['DayOfWeek'] != 7
newDay = newDay.astype(int)
train = train.drop(['DayOfWeek'], axis = 1)
train = pd.concat((train, newDay), axis = 1)
condTrain = (train['Date'] < '2015-01-01')
Xtrain = train[condTrain][selects].drop(['Date'], axis = 1).values
ytrain = train[condTrain]['Sales'].values
Xtest = train[condTrain != True][selects].drop(['Date'], axis = 1).values
ytest = train[condTrain != True]['Sales'].values
lin1 = LinearRegression()
lin1.fit(Xtrain, ytrain)
lin1.score(Xtrain, ytrain)
y_pred = lin1.predict(Xtest)
(ytrain == lin1.predict(Xtrain))
(ytest == lin1.predict(Xtest))
y_true = ytest
sse = sum((y_true - y_pred) ** 2)
sst = sum((y_true - np.mean(y_true)) ** 2)
ssr = sst - sse
adj_r2_02 = 1 - (sse / sst)
plt.figure(figsize = (36, 4))
plt.scatter(range(len(ytest)), ytest, marker = 'x')
plt.scatter(range(len(ytest)), y_pred, marker = 'x')
plt.figure(figsize = (12, 8))
plt.scatter(Xtest[:, 2], y_pred, marker = '+')
slope, intercept, r_value, p_value, stderr = stats.linregress(Xtest[:, 2], y_pred)
ry = polyval([slope, intercept], Xtest[:, 2])
plt.plot(Xtest[:, 2], ry, 'r')
#%% Logistic Regression -> Failed -> MemoryError
import gc
gc.collect()
train=pd.read_csv('https://drive.google.com/uc?export=download&id=1KA7mKUmQv4PrF-qMFrH35LN6q_i56Bf1',
header = 0, dtype={'StateHoliday':'str'})
discards = ['SchoolHoliday', 'StateHoliday', 'Promo', 'Store']
selects = ['Date', 'Customers', 'Open', 'DayOfWeek']
train = train.drop(discards, axis = 1)
newDay = train['DayOfWeek'] != 7
newDay = newDay.astype(int)
train = train.drop(['DayOfWeek'], axis = 1)
train = pd.concat((train, newDay), axis = 1)
condTrain = (train['Date'] < '2015-01-01')
Xtrain = train[condTrain][selects].drop(['Date'], axis = 1).values
ytrain = train[condTrain]['Sales'].values
Xtest = train[condTrain != True][selects].drop(['Date'], axis = 1).values
ytest = train[condTrain != True]['Sales'].values
lin2 = LogisticRegression()
lin2.fit(Xtrain, ytrain)
lin2.score(Xtrain, ytrain)
y_pred = lin1.predict(Xtest)
(ytrain == lin2.predict(Xtrain))
(ytest == lin2.predict(Xtest))
plt.figure(figsize = (36, 4))
plt.scatter(range(len(ytest)), ytest, marker = 'x')
plt.scatter(range(len(ytest)), y_pred, marker = 'x')
plt.figure(figsize = (12, 8))
plt.scatter(Xtest[:, 0], y_pred, marker = '+')
slope, intercept, r_value, p_value, stderr = stats.linregress(Xtest[:, 0], y_pred)
ry = polyval([slope, intercept], Xtest[:, 0])
plt.plot(Xtest[:, 0], ry, 'r')
#%% KNeighborsRegressor
train=pd.read_csv('https://drive.google.com/uc?export=download&id=1KA7mKUmQv4PrF-qMFrH35LN6q_i56Bf1',
header = 0, dtype={'StateHoliday':'str'})
discards = ['SchoolHoliday', 'StateHoliday', 'Promo', 'Store']
selects = ['Date', 'Customers', 'Open', 'DayOfWeek']
train = train.drop(discards, axis = 1)
newDay = train['DayOfWeek'] != 7
newDay = newDay.astype(int)
train = train.drop(['DayOfWeek'], axis = 1)
train = pd.concat((train, newDay), axis = 1)
condTrain = (train['Date'] < '2015-01-01')
Xtrain = train[condTrain][selects].drop(['Date'], axis = 1).values
ytrain = train[condTrain]['Sales'].values
Xtest = train[condTrain != True][selects].drop(['Date'], axis = 1).values
ytest = train[condTrain != True]['Sales'].values
lin2 = KNeighborsRegressor(n_neighbors = 3, weights = "distance")
lin2.fit(Xtrain, ytrain)
lin2.score(Xtrain, ytrain)
y_pred = lin2.predict(Xtest)
(ytrain == lin2.predict(Xtrain))
(ytest == lin2.predict(Xtest))
plt.figure(figsize = (36, 4))
plt.scatter(range(len(ytest)), ytest, marker = 'x')
plt.scatter(range(len(ytest)), y_pred, marker = 'x')
plt.figure(figsize = (12, 8))
plt.scatter(Xtest[:, 2], y_pred, marker = '+')
slope, intercept, r_value, p_value, stderr = stats.linregress(Xtest[:, 2], y_pred)
ry = polyval([slope, intercept], Xtest[:, 2])
plt.plot(Xtest[:, 2], ry, 'b')
#%% Time series Analysis -> VAR
import statsmodels.api as sm
var1 = sm.tsa.VAR(Xtrain)
result1 = var1.fit()
result1.summary()
result1.forecast(result1.model.endog[-1:], 10)
#%% Time series Analysis -> AR
from statsmodels.tsa.ar_model import AR
from sklearn.metrics import mean_squared_error
#%% Only the univariate case is implemented
#%% 'Date' and 'Sales'
model = AR(Xtrain)
model_fit = model.fit()
#%% Open -> Select
a = []
for date, week in Xtrain.groupby('Open'):
a.append(week['Sales'])
plt.figure()
plt.boxplot(a)
#%% Promo -> Discard
train['Promo'].unique
train.groupby('Promo')['Sales'].var()
means = train.groupby('Promo')['Sales'].mean()
std = train.groupby('Promo')['Sales'].std()
plt.bar(range(len(means)), means)
plt.errorbar(range(len(means)), means, yerr = std, fmt = 'o', c = 'r', ecolor = 'r',
capthick = 2, capsize = 10)
plt.xticks(range(len(means)), means.index)
train[['Promo', 'Sales']].corr()
plt.figure(figsize = (12, 8))
plt.scatter(train['Promo'], train['Sales'], marker = '+')
slope, intercept, r_value, p_value, stderr = stats.linregress(train['Promo'], train['Sales'])
ry = polyval([slope, intercept], train['Promo'])
plt.plot(train['Promo'], ry, 'r')
a = []
for date, week in Xtrain.groupby('Promo'):
a.append(week['Sales'])
plt.figure()
plt.boxplot(a)
#%% Customers -> Select
train[['Customers', 'Sales']].corr()
plt.figure(figsize = (12, 8))
plt.scatter(train['DayOfWeek'], train['Sales'], marker = '+')
slope, intercept, r_value, p_value, stderr = stats.linregress(train['DayOfWeek'], train['Sales'])
ry = polyval([slope, intercept], train['DayOfWeek'])
plt.plot(train['DayOfWeek'], ry, 'y')
#%% DayOfWeek -> Select
test = ['DayOfWeek']
train.groupby('DayOfWeek')['Sales'].describe()
a = []
means = [0]
for date, week in Xtrain.groupby('DayOfWeek'):
a.append(week['Sales'])
means.append(week['Sales'].mean())
plt.figure()
plt.boxplot(a)
plt.plot(means)
plt.show()
means = train.groupby('DayOfWeek')['Sales'].mean()
std = train.groupby('DayOfWeek')['Sales'].std()
plt.bar(range(len(means)), means)
plt.errorbar(range(len(means)), means, yerr = std, fmt = 'o', c = 'r', ecolor = 'r',
capthick = 2, capsize = 10)
plt.xticks(range(len(means)), means.index)
#%% State Holiday -> Discard
means = train.groupby('StateHoliday')['Sales'].mean()
std = train.groupby('StateHoliday')['Sales'].std()
plt.bar(range(len(means)), means)
plt.errorbar(range(len(means)), means, yerr = std, fmt = 'o', c = 'r', ecolor = 'r',
capthick = 2, capsize = 10)
plt.xticks(range(len(means)), means.index)
## 실행
train['StateHoliday'].unique
holiday = (train['StateHoliday'] == "0") | (train['StateHoliday'] == 0)
holiday = holiday.astype(int)
train = train.drop(['StateHoliday'], axis = 1)
train = pd.concat((train, holiday), axis = 1)
#### 여기까지
#%% Correlation Graph
corr = train.corr()
fig=plt.figure(figsize=(12,8))
cax=plt.imshow(corr, vmin=-1, vmax=1, cmap=plt.cm.RdBu)
ax=plt.gca()
ax.set_xticks(range(len(corr)))
ax.set_yticks(range(len(corr)))
ax.set_xticklabels(corr,fontsize=10,rotation='vertical')
ax.set_yticklabels(corr,fontsize=10)
plt.colorbar(cax)
train[['StateHoliday', 'Sales']].corr()
train[train['Open'] == 1]['Sales'].describe()
train[(train['Open'] == 1) & (train['Sales'] > 8360)].count()
means = train.groupby('Open')['Sales'].mean()
std = train.groupby('Open')['Sales'].std()
plt.bar(range(len(means)), means)
plt.errorbar(range(len(means)), means, yerr = std, fmt = 'o', c = 'r', ecolor = 'r',
capthick = 2, capsize = 10)
plt.xticks(range(len(means)), means.index)
train[train['Open'] == 1]
plt.figure()
plt.boxplot(train[train['Open'] == 1]['Sales'])
#%% School Holiday -> Discard
means = train.groupby('SchoolHoliday')['Sales'].mean()
std = train.groupby('SchoolHoliday')['Sales'].std()
plt.bar(range(len(means)), means)
plt.errorbar(range(len(means)), means, yerr = std, fmt = 'o', c = 'r', ecolor = 'r',
capthick = 2, capsize = 10)
plt.xticks(range(len(means)), means.index)
"""
plt.plot_date(train['Date'], train['Sales'])
plt.figure(figsize = (20, 1))
plt.plot(train['Date'], train['Sales'], linewidth = 1)
"""
|
[
"50601968+VanSubstance@users.noreply.github.com"
] |
50601968+VanSubstance@users.noreply.github.com
|
6ffabdb437b2f0229262f2a7b57b5eb2b66df757
|
beb12cce69e21804a9ec4d64062bf6bb062261aa
|
/bin/EAFP.py
|
74646c34e932b3821298f5c393f4bebacf076c1c
|
[] |
no_license
|
voyeg3r/dotfaster
|
f7a0cad32ea3420417cd728be24a58533cb907fa
|
90c4f1ec4471668fec1f4db755158058fb533be2
|
refs/heads/master
| 2021-01-02T22:49:47.246952
| 2018-06-02T20:56:58
| 2018-06-02T20:56:58
| 99,405,357
| 5
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 678
|
py
|
#!/usr/bin/env python3
# # -*- coding: UTF-8 -*-"
# ------------------------------------------------
# Creation Date: 23-03-2017
# Last Change: ter 29 nov 2016 09:21:52 BRT
# File: EAFP.py
# author: sergio luiz araujo silva
# site: http://vivaotux.blogspot.com
# twitter: @voyeg3r
# ------------------------------------------------
'''
This script attempts to show the concept of:
It is easyer to ask forgiveness than permission
'''
person = {'name': 'Jess', 'age': 23, 'job': 'Programmer'}
try:
print("I'm {name}. I'm {age} years old and I'm {job}".format(**person))
except KeyError as e:
print(f"Missing {e} key")
|
[
"voyeg3r@gmail.com"
] |
voyeg3r@gmail.com
|
530d9a1a9c81e48861a573078a5fcca53d28e741
|
e4ec5b6cf3cfe2568ef0b5654c019e398b4ecc67
|
/azure-cli/2.0.18/libexec/lib/python3.6/site-packages/azure/mgmt/network/v2017_06_01/models/network_interface_association.py
|
56f1d3b0eda3f4acd5b0007f57df14bfd8f42f49
|
[] |
no_license
|
EnjoyLifeFund/macHighSierra-cellars
|
59051e496ed0e68d14e0d5d91367a2c92c95e1fb
|
49a477d42f081e52f4c5bdd39535156a2df52d09
|
refs/heads/master
| 2022-12-25T19:28:29.992466
| 2017-10-10T13:00:08
| 2017-10-10T13:00:08
| 96,081,471
| 3
| 1
| null | 2022-12-17T02:26:21
| 2017-07-03T07:17:34
| null |
UTF-8
|
Python
| false
| false
| 1,281
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class NetworkInterfaceAssociation(Model):
"""Network interface and its custom security rules.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Network interface ID.
:vartype id: str
:param security_rules: Collection of custom security rules.
:type security_rules: list of :class:`SecurityRule
<azure.mgmt.network.v2017_06_01.models.SecurityRule>`
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'security_rules': {'key': 'securityRules', 'type': '[SecurityRule]'},
}
def __init__(self, security_rules=None):
self.id = None
self.security_rules = security_rules
|
[
"Raliclo@gmail.com"
] |
Raliclo@gmail.com
|
40704cee49a3949e9dcf543e0695bacb829c017f
|
e885c02621101ea646c9dcc3e934dd7ceaaf4f04
|
/djangocms_disqus/migrations/0001_initial.py
|
7be273f44c0b09ed5f6447a8d57db12cadbb0691
|
[
"BSD-3-Clause"
] |
permissive
|
mishbahr/djangocms-disqus
|
40421d6662ef911542287fc0c2e8b81a63e49667
|
49e75a024e2ca1c932a8b9134500c2f24137a153
|
refs/heads/master
| 2023-01-05T00:46:39.514178
| 2017-05-23T22:15:12
| 2017-05-23T22:15:12
| 42,411,019
| 21
| 5
|
BSD-3-Clause
| 2022-12-26T19:52:38
| 2015-09-13T20:07:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,804
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from connected_accounts.fields import AccountField
from ..conf import settings
class Migration(migrations.Migration):
dependencies = [
('connected_accounts', '__latest__'),
('cms', '__latest__'),
]
operations = [
migrations.CreateModel(
name='Disqus',
fields=[
('cmsplugin_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='cms.CMSPlugin')),
('shortname', models.CharField(help_text='Select a website Or register a new one on the Disqus website. https://disqus.com/admin/signup/', max_length=150, verbose_name='Shortname')),
('enable_sso', models.BooleanField(default=False, help_text='Allows users to log in to Disqus via your site.', verbose_name='Enable Single Sign-On')),
('load_event', models.CharField(default=settings.DJANGOCMS_DISQUS_LOADING_CHOICES[0][0], max_length=100, verbose_name='Load Disqus', choices=settings.DJANGOCMS_DISQUS_LOADING_CHOICES)),
('site_name', models.CharField(help_text='Used for the SSO login button.', max_length=100, verbose_name='Site Name', blank=True)),
('button_text', models.CharField(help_text='By default it will be "Load Comments..."', max_length=100, verbose_name='Button Text', blank=True)),
('account', AccountField(verbose_name='Connected Account', to='connected_accounts.Account', provider='disqus', help_text='Select a connected Disqus account or connect to a new account.')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
|
[
"mishbah@jp74.com"
] |
mishbah@jp74.com
|
4f56cee030454bf7d814b2615a38c73539bcce37
|
d186f9763a16cddc161568728827636a8b68f2f2
|
/src/grpc_service/service_pb2_grpc.py
|
37cda993f81dc828c5dfc5ef4100daddd986874b
|
[] |
no_license
|
xvicmanx/machine-learning
|
12fce38a70b88132d633f8956435d72fc3fee050
|
8389125e8a0f41c3c803bdfa94f5483ab30897d1
|
refs/heads/main
| 2023-02-11T19:35:43.298423
| 2021-01-06T12:59:29
| 2021-01-06T12:59:29
| 308,706,331
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,434
|
py
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import service_pb2 as service__pb2
class MachineLearningStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.PredictSalary = channel.unary_unary(
'/machine_learning.MachineLearning/PredictSalary',
request_serializer=service__pb2.PredictSalaryRequest.SerializeToString,
response_deserializer=service__pb2.PredictSalaryResponse.FromString,
)
self.PredictPurchase = channel.unary_unary(
'/machine_learning.MachineLearning/PredictPurchase',
request_serializer=service__pb2.PredictPurchaseRequest.SerializeToString,
response_deserializer=service__pb2.PredictPurchaseResponse.FromString,
)
self.PredictSegment = channel.unary_unary(
'/machine_learning.MachineLearning/PredictSegment',
request_serializer=service__pb2.PredictSegmentRequest.SerializeToString,
response_deserializer=service__pb2.PredictSegmentResponse.FromString,
)
self.GetOptimalCampaignAdOption = channel.unary_unary(
'/machine_learning.MachineLearning/GetOptimalCampaignAdOption',
request_serializer=service__pb2.GetOptimalCampaignAdOptionRequest.SerializeToString,
response_deserializer=service__pb2.GetOptimalCampaignAdOptionResponse.FromString,
)
self.PredictReviewOutcome = channel.unary_unary(
'/machine_learning.MachineLearning/PredictReviewOutcome',
request_serializer=service__pb2.PredictReviewOutcomeRequest.SerializeToString,
response_deserializer=service__pb2.PredictReviewOutcomeResponse.FromString,
)
self.PredictBankLeaving = channel.unary_unary(
'/machine_learning.MachineLearning/PredictBankLeaving',
request_serializer=service__pb2.PredictBankLeavingRequest.SerializeToString,
response_deserializer=service__pb2.PredictBankLeavingResponse.FromString,
)
self.PredictCatOrDog = channel.unary_unary(
'/machine_learning.MachineLearning/PredictCatOrDog',
request_serializer=service__pb2.PredictCatOrDogRequest.SerializeToString,
response_deserializer=service__pb2.PredictCatOrDogResponse.FromString,
)
class MachineLearningServicer(object):
"""Missing associated documentation comment in .proto file."""
def PredictSalary(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PredictPurchase(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PredictSegment(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetOptimalCampaignAdOption(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PredictReviewOutcome(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PredictBankLeaving(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PredictCatOrDog(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_MachineLearningServicer_to_server(servicer, server):
rpc_method_handlers = {
'PredictSalary': grpc.unary_unary_rpc_method_handler(
servicer.PredictSalary,
request_deserializer=service__pb2.PredictSalaryRequest.FromString,
response_serializer=service__pb2.PredictSalaryResponse.SerializeToString,
),
'PredictPurchase': grpc.unary_unary_rpc_method_handler(
servicer.PredictPurchase,
request_deserializer=service__pb2.PredictPurchaseRequest.FromString,
response_serializer=service__pb2.PredictPurchaseResponse.SerializeToString,
),
'PredictSegment': grpc.unary_unary_rpc_method_handler(
servicer.PredictSegment,
request_deserializer=service__pb2.PredictSegmentRequest.FromString,
response_serializer=service__pb2.PredictSegmentResponse.SerializeToString,
),
'GetOptimalCampaignAdOption': grpc.unary_unary_rpc_method_handler(
servicer.GetOptimalCampaignAdOption,
request_deserializer=service__pb2.GetOptimalCampaignAdOptionRequest.FromString,
response_serializer=service__pb2.GetOptimalCampaignAdOptionResponse.SerializeToString,
),
'PredictReviewOutcome': grpc.unary_unary_rpc_method_handler(
servicer.PredictReviewOutcome,
request_deserializer=service__pb2.PredictReviewOutcomeRequest.FromString,
response_serializer=service__pb2.PredictReviewOutcomeResponse.SerializeToString,
),
'PredictBankLeaving': grpc.unary_unary_rpc_method_handler(
servicer.PredictBankLeaving,
request_deserializer=service__pb2.PredictBankLeavingRequest.FromString,
response_serializer=service__pb2.PredictBankLeavingResponse.SerializeToString,
),
'PredictCatOrDog': grpc.unary_unary_rpc_method_handler(
servicer.PredictCatOrDog,
request_deserializer=service__pb2.PredictCatOrDogRequest.FromString,
response_serializer=service__pb2.PredictCatOrDogResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'machine_learning.MachineLearning', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class MachineLearning(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def PredictSalary(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/machine_learning.MachineLearning/PredictSalary',
service__pb2.PredictSalaryRequest.SerializeToString,
service__pb2.PredictSalaryResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def PredictPurchase(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/machine_learning.MachineLearning/PredictPurchase',
service__pb2.PredictPurchaseRequest.SerializeToString,
service__pb2.PredictPurchaseResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def PredictSegment(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/machine_learning.MachineLearning/PredictSegment',
service__pb2.PredictSegmentRequest.SerializeToString,
service__pb2.PredictSegmentResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetOptimalCampaignAdOption(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/machine_learning.MachineLearning/GetOptimalCampaignAdOption',
service__pb2.GetOptimalCampaignAdOptionRequest.SerializeToString,
service__pb2.GetOptimalCampaignAdOptionResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def PredictReviewOutcome(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/machine_learning.MachineLearning/PredictReviewOutcome',
service__pb2.PredictReviewOutcomeRequest.SerializeToString,
service__pb2.PredictReviewOutcomeResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def PredictBankLeaving(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/machine_learning.MachineLearning/PredictBankLeaving',
service__pb2.PredictBankLeavingRequest.SerializeToString,
service__pb2.PredictBankLeavingResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def PredictCatOrDog(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/machine_learning.MachineLearning/PredictCatOrDog',
service__pb2.PredictCatOrDogRequest.SerializeToString,
service__pb2.PredictCatOrDogResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
[
"vic3jo@gmail.com"
] |
vic3jo@gmail.com
|
c4a1df2d9ae8ee97feb1e460d630361ef6d293ba
|
6c3dd7bbac078d9a83554333f9a3f880006f6caa
|
/src/ec2/ec2.py
|
44208cc321beda870456ff497fbdb167c7e27775
|
[] |
no_license
|
syck40/boto
|
2ceefb61d2ab2cc3ab42de6783828359cc30f550
|
dca6543400a02633f849ffc545ef0c2cc3c71a51
|
refs/heads/master
| 2020-05-03T12:36:00.456702
| 2019-03-31T06:59:57
| 2019-03-31T06:59:57
| 178,630,625
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
class EC2:
def __init__(self, client):
self._client = client
""":type:pyboto3.ec2"""
def create_key_pair(self, key_name):
print('Creating key pair with name '+key_name)
return self._client.create_key_pair(KeyName=key_name)
|
[
"syck40@gmail.com"
] |
syck40@gmail.com
|
4ceb508de96190a7e0a24c04b217aef38ed63e63
|
fb9722f0bf9556f5c04ba5c2795a7c23e7bff7ca
|
/lista.py
|
e6605f71cc764640f8d592c6ae6c6a4b54c215bb
|
[] |
no_license
|
anastasiacebotari15/List
|
d59aad164bf082537bed6f86fb3bba087e1a5e22
|
432dcd0fd6b3b0369b843da71586cd073476d770
|
refs/heads/main
| 2023-02-21T08:54:17.280665
| 2021-01-25T20:04:14
| 2021-01-25T20:04:14
| 332,862,203
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 330
|
py
|
x=[-1,0,-5,-7,-6,5,6,7,9,2,-3]
lista1=x
print('lista1=', lista1)
lista2=sorted(x)
print('lista2=', lista2)
x.sort(reverse=True)
lista3=x
print('lista3=', lista3)
print(len(x))
print('nr maxim=', max(x))
print('nr minim=', min(x))
x.extend([111])
print('lista4=', x)
x.insert(1,222)
x.remove(111)
print('lista5=', x)
|
[
"noreply@github.com"
] |
noreply@github.com
|
7d10a0ba89d020ea8778672c530012d3496bb89b
|
0ab5b15d1b97b9d72a9e4218ad6b7377c26e76ec
|
/tkContacts_LAB15.py
|
c4c4c3d8fbfbf064790aa63503f585440122fa65
|
[] |
no_license
|
RagggySu/-Sample-work-from-other-person-Portfolio
|
3beb01e18b5ace8858bb73eb9aad76e67c87d94b
|
8f5b6d2f3f4d82435cd166d6f4c038ae7352e59c
|
refs/heads/main
| 2023-05-05T06:50:13.906847
| 2021-05-28T18:45:05
| 2021-05-28T18:45:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,481
|
py
|
# Programmer: James Aniciete
# Course No.: CSC 157
# Lab No.: 15
# Date: 5/9/2020
from tkinter import *
from tkinter import messagebox # for exit button's messagebox
import os # for exiting the app
import myDatabasefile as dbf
import sqlite3
# create table
dbf.createTable()
# get contactlist
contactlist = dbf.selectAll()
# function to check for valid data entries
def validate(s): # s for string
if s.strip("") != "":
return True
else:
return False
# function to get the selection from the listbox
def selection():
return int(select.curselection()[0])
# function to add a contact
def addContact():
if validate(nameVar.get()) == True and validate(phoneVar.get()) == True:
dbf.insert(nameVar.get(), phoneVar.get())
canRoll = True
# refresh the GUI
refresh()
elif validate(nameVar.get()) == False:
print("Error: Enter a name.")
elif validate(phoneVar.get()) == False:
print("Error: Enter a phone number.")
else:
print("Error: Contact not added.\nMake sure that the Name and Phone fields are filled.")
# function to update a contact
def updateContact():
if validate(nameVar.get()) == True and validate(phoneVar.get()) == True:
dbf.update(oName, oPhone, nameVar.get(), phoneVar.get())
canRoll = True
# refresh the GUI
refresh()
elif validate(nameVar.get()) == False:
print("Error: Enter a name.")
elif validate(phoneVar.get()) == False:
print("Error: Enter a phone number.")
else:
print("Error: Contact not updated.\nMake sure a contact is selected and that the Name and Phone fields are filled.")
# function to delete a contact
def deleteContact():
try:
if messagebox.askokcancel(title = "Delete Contact",
message = f"Are you sure you want to delete {contactlist[selection()][0]}'s contact information?") == 1:
dbf.delete(nameVar.get(), phoneVar.get())
canRoll = True
refresh()
except:
print("Error: Select a contact to be deleted.")
# function to load a contact
def loadContact():
try:
# not really sure how this works
global oName, oPhone
oName = contactlist[selection()][0]
oPhone = contactlist[selection()][1]
# put name and phone selections into a tuple
name, phone = contactlist[selection()]
# use tuple to assign values to name and phone variables
nameVar.set(name)
phoneVar.set(phone)
except:
print("Error: Select a contact from the list.")
# function to rollback a change
def rollback():
global canRoll
if canRoll == True:
if (messagebox.askokcancel(title = "Rollback", message = "Would you like to undo the previous change?") == 1):
dbf.rollback()
refresh()
canRoll = False
# function to exit the program
def exitContact():
app_title = "Contacts"
if messagebox.askokcancel(title = app_title, message = "Do you want to exit, OK or Cancel") == 1:
# commit and close the database
dbf.db.commit()
dbf.db.close()
os._exit(1)
# function that places all widgets into the frame individually
def buildFrame () :
# define global variables
global nameVar, phoneVar, select
# create the main window widget
root = Tk()
# add title to the frame
root.title("My Contact List")
# create & pack a frame in the root window
frame1 = Frame(root)
frame1.pack()
# on 1st row of frame:
# create a label for name
Label(frame1, text="Name:").grid(row=0, column=0, sticky=W)
# initialize StringVar for name
nameVar = StringVar()
# assign entry button value to the name var
name = Entry(frame1, textvariable=nameVar)
# position name var in first row, second column, aligned to the west cell border
name.grid(row=0, column=1, sticky=W)
# on 2nd row of the frame:
# create a label for phone no.
Label(frame1, text="Phone:").grid(row=1, column=0, sticky=W)
# create string var for phone no.
phoneVar= StringVar()
# assign entry button value to phone var
phone= Entry(frame1, textvariable=phoneVar)
# position phone var in second row, second column, aligned to the west
phone.grid(row=1, column=1, sticky=W)
# create & pack a frame in the root window
frame1 = Frame(root)
frame1.pack()
# add a row of buttons to frame1 with respective callback functions
btn1 = Button(frame1,text=" Add ",command=addContact)
btn2 = Button(frame1,text="Update",command=updateContact)
btn3 = Button(frame1,text="Delete",command=deleteContact)
btn4 = Button(frame1,text=" Load ",command=loadContact)
btn5 = Button(frame1,text="Rollback",command=rollback)
# pack the buttons on the same row to the left
btn1.pack(side=LEFT)
btn2.pack(side=LEFT)
btn3.pack(side=LEFT)
btn4.pack(side=LEFT)
btn5.pack(side=LEFT)
# allow for selection of names from a ListBox with a scrollbar
frame1 = Frame(root)
frame1.pack()
# create a vertical bar widget
scroll = Scrollbar(frame1, orient=VERTICAL)
# whichever value from the ListBox is clicked is assigned to select
# height = # of values visible in the Listbox
select = Listbox(frame1, yscrollcommand=scroll.set, height=8)
scroll.config (command=select.yview)
scroll.pack(side=RIGHT, fill=Y)
select.pack(side=LEFT, fill=BOTH)
# create frame for Exit button at the bottom of the window
frame2 = Frame(root)
frame2.pack()
# create exit button & pack it
btn6 = Button(frame2, text = " Exit ", command = exitContact)
btn6.pack()
# return root object to allow for the frame to be built
return root
# sorts the contact list & allows for an update to the ListBox
def setList():
contactlist.sort()
# delete all elements from the select element
select.delete(0, END)
# insert each name from the list to the end of the select element
for name, phone in contactlist:
select.insert(END, name)
# refresh function - used add the end of add, update, delete functions
def refresh():
global canRoll, contactlist
canRoll = True
contactlist = dbf.selectAll()
setList()
# initialize the application
root = buildFrame()
setList()
# set size of window (width x height)
root.geometry("300x225")
root.mainloop()
|
[
"noreply@github.com"
] |
noreply@github.com
|
234e009d0b1fedd51b2692aa9e1401871a904c8e
|
5b05b2a15e5ad633f4f87124a5eff0d662af6e3c
|
/CONTEST-DIV2/Round 714/B/B.py
|
146cdb225283534f6605ea8c5721ab243cda0f83
|
[] |
no_license
|
CristianLazoQuispe/CODEFORCES-Contest
|
505eaf7d4dd3473a07ba828ab614f4c504fbc853
|
27f5c490e59d90437833369c32d5a8dd042b262f
|
refs/heads/main
| 2023-04-27T13:26:17.608905
| 2021-05-07T20:27:12
| 2021-05-07T20:27:12
| 355,346,649
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,082
|
py
|
import functools,operator
T = int(input())
def solve(lista):
ida = []
vuelta = []
maxi = len(lista)
back = None
for i in range(len(lista)):
if back is None:
value = functools.reduce(operator.and_, [lista[i]])
ida.append(value)
back = value
else:
value = functools.reduce(operator.and_, [value,lista[i]])
ida.append(value)
back = value
back = None
for i in range(len(lista)):
i = maxi-i-1
if back is None:
value = functools.reduce(operator.and_, [lista[i]])
vuelta.append(value)
back = value
else:
value = functools.reduce(operator.and_, [value,lista[i]])
vuelta.append(value)
back = value
suma = 0
for idx,ida_i in enumerate(ida):
if vuelta[maxi-idx-1] == ida_i:
suma+=1
print(idx,ida_i)
return suma
for i in range(T):
n = int(input())
lista = list(map(int,input().split()))
ans = solve(lista)
print(ans)
|
[
"mecatronico.lazo@gmail.com"
] |
mecatronico.lazo@gmail.com
|
61a49f9ce140730c3fb6b664ca5ac5bc8085cfb0
|
6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386
|
/google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/services/types/media_file_service.py
|
d18d6a8d09b03c92f8310398e3c6a6a1be1ac137
|
[
"Apache-2.0"
] |
permissive
|
oltoco/googleapis-gen
|
bf40cfad61b4217aca07068bd4922a86e3bbd2d5
|
00ca50bdde80906d6f62314ef4f7630b8cdb6e15
|
refs/heads/master
| 2023-07-17T22:11:47.848185
| 2021-08-29T20:39:47
| 2021-08-29T20:39:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,355
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v6.enums.types import response_content_type as gage_response_content_type
from google.ads.googleads.v6.resources.types import media_file as gagr_media_file
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v6.services',
marshal='google.ads.googleads.v6',
manifest={
'GetMediaFileRequest',
'MutateMediaFilesRequest',
'MediaFileOperation',
'MutateMediaFilesResponse',
'MutateMediaFileResult',
},
)
class GetMediaFileRequest(proto.Message):
r"""Request message for
[MediaFileService.GetMediaFile][google.ads.googleads.v6.services.MediaFileService.GetMediaFile]
Attributes:
resource_name (str):
Required. The resource name of the media file
to fetch.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
class MutateMediaFilesRequest(proto.Message):
r"""Request message for
[MediaFileService.MutateMediaFiles][google.ads.googleads.v6.services.MediaFileService.MutateMediaFiles]
Attributes:
customer_id (str):
Required. The ID of the customer whose media
files are being modified.
operations (Sequence[google.ads.googleads.v6.services.types.MediaFileOperation]):
Required. The list of operations to perform
on individual media file.
partial_failure (bool):
If true, successful operations will be
carried out and invalid operations will return
errors. If false, all operations will be carried
out in one transaction if and only if they are
all valid. Default is false.
validate_only (bool):
If true, the request is validated but not
executed. Only errors are returned, not results.
response_content_type (google.ads.googleads.v6.enums.types.ResponseContentTypeEnum.ResponseContentType):
The response content type setting. Determines
whether the mutable resource or just the
resource name should be returned post mutation.
"""
customer_id = proto.Field(
proto.STRING,
number=1,
)
operations = proto.RepeatedField(
proto.MESSAGE,
number=2,
message='MediaFileOperation',
)
partial_failure = proto.Field(
proto.BOOL,
number=3,
)
validate_only = proto.Field(
proto.BOOL,
number=4,
)
response_content_type = proto.Field(
proto.ENUM,
number=5,
enum=gage_response_content_type.ResponseContentTypeEnum.ResponseContentType,
)
class MediaFileOperation(proto.Message):
r"""A single operation to create media file.
Attributes:
create (google.ads.googleads.v6.resources.types.MediaFile):
Create operation: No resource name is
expected for the new media file.
"""
create = proto.Field(
proto.MESSAGE,
number=1,
oneof='operation',
message=gagr_media_file.MediaFile,
)
class MutateMediaFilesResponse(proto.Message):
r"""Response message for a media file mutate.
Attributes:
partial_failure_error (google.rpc.status_pb2.Status):
Errors that pertain to operation failures in the partial
failure mode. Returned only when partial_failure = true and
all errors occur inside the operations. If any errors occur
outside the operations (e.g. auth errors), we return an RPC
level error.
results (Sequence[google.ads.googleads.v6.services.types.MutateMediaFileResult]):
All results for the mutate.
"""
partial_failure_error = proto.Field(
proto.MESSAGE,
number=3,
message=status_pb2.Status,
)
results = proto.RepeatedField(
proto.MESSAGE,
number=2,
message='MutateMediaFileResult',
)
class MutateMediaFileResult(proto.Message):
r"""The result for the media file mutate.
Attributes:
resource_name (str):
The resource name returned for successful
operations.
media_file (google.ads.googleads.v6.resources.types.MediaFile):
The mutated media file with only mutable fields after
mutate. The field will only be returned when
response_content_type is set to "MUTABLE_RESOURCE".
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
media_file = proto.Field(
proto.MESSAGE,
number=2,
message=gagr_media_file.MediaFile,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
ee235f82c46f75248d18f091913758a6b068b1f9
|
87b2725ccb7509cda0d4f719647192c34bbf7471
|
/HistogramPlot.py
|
e5c1ce1adf7878de50ebd4567ee1dabb94e7efd0
|
[] |
no_license
|
sumeyyeakay/CoronaVirusDataAnalysis
|
f88a5c9698cd6867059a91b5750f4bd14f414d62
|
45f4b386b95ed2143d96940e74bdc41854cba466
|
refs/heads/master
| 2022-09-09T02:19:35.034587
| 2020-06-01T15:17:18
| 2020-06-01T15:17:18
| 268,553,637
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 493
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 28 17:44:03 2020
@author: sumeyyeakay
Histogram grafikleri
"""
import pandas as pd
import matplotlib.pyplot as plt
df=pd.read_csv("covid_19_data.csv")
turkiye = df[df["Country/Region"] == "Turkey"]
italya = df[df["Country/Region"] == "Italy"]
ispanya = df[df["Country/Region"] == "Spain"]
plt.hist(italya.Deaths,bins=10)
plt.xlabel("Olum Sayisi")
plt.ylabel(" Kurtulan Hasta Sayisi")
plt.title("Italya Coronovirus Analizi")
plt.show()
|
[
"sumeyyeakayy@gmail.com"
] |
sumeyyeakayy@gmail.com
|
54a7a8cba0c76261822e8420ebdd9b22a638ba22
|
1ba12eb2be477e7dc99b4f13d1014917e78199aa
|
/usr/lib/solydxk/constructor/solydxk.py
|
89f79749e8211f426ccb25c69f76882e3d7ac50e
|
[] |
no_license
|
KDB2/solydxk-constructor
|
0704f5ce5ef331f45888348804936cfcf4c43f25
|
c05b8c38b873bb36eb3c8d3160600f45d5cd4798
|
refs/heads/master
| 2021-01-17T06:31:41.055358
| 2015-11-03T16:02:32
| 2015-11-03T16:02:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 27,011
|
py
|
#! /usr/bin/env python3
import re
import threading
from os import remove, rmdir, makedirs, system, listdir
from shutil import copy, move
from datetime import datetime
from execcmd import ExecCmd
from os.path import join, exists, basename, abspath, dirname, lexists, isdir
class IsoUnpack(threading.Thread):
def __init__(self, mountDir, unpackIso, unpackDir, queue):
threading.Thread.__init__(self)
self.ec = ExecCmd()
self.mountDir = mountDir
self.unpackIso = unpackIso
self.unpackDir = unpackDir
self.queue = queue
self.returnMessage = None
def run(self):
try:
if not exists(self.mountDir):
print(("Create mount directory: %s" % self.mountDir))
makedirs(self.mountDir)
rootDir = join(self.unpackDir, "root")
if not exists(rootDir):
print(("Create root directory: %s" % rootDir))
makedirs(rootDir)
isolinuxDir = join(self.unpackDir, "boot/isolinux")
if not exists(isolinuxDir):
print(("Create isolinux directory: %s" % isolinuxDir))
makedirs(isolinuxDir)
liveDir = join(self.unpackDir, "boot/live")
if not exists(liveDir):
print(("Create liveDir directory: %s" % liveDir))
makedirs(liveDir)
# Mount the ISO
system("mount -o loop '%s' '%s'" % (self.unpackIso, self.mountDir))
# Check isolinux directory
mountIsolinux = join(self.mountDir, "isolinux")
if not exists(mountIsolinux):
self.ec.run("umount --force '%s'" % self.mountDir)
self.returnMessage = "ERROR: Cannot find isolinux directory in ISO"
fixCfgCmd = None
dirs = []
mountSquashfs = None
if self.returnMessage is None:
subdirs = self.getDirectSubDirectories(self.mountDir)
for subdir in subdirs:
if self.hasSquashFs(join(self.mountDir, subdir)):
mountSquashfs = join(self.mountDir, subdir)
if subdir != "live":
fixCfgCmd = "sed -i 's/\/%s/\/live/g' %s/isolinux.cfg" % (subdir, isolinuxDir)
elif subdir != "isolinux":
dirs.append(join(self.mountDir, subdir))
if mountSquashfs is None:
self.ec.run("umount --force '%s'" % self.mountDir)
self.returnMessage = "ERROR: Cannot find squashfs directory in ISO"
if self.returnMessage is None:
# Copy files from ISO to unpack directory
for d in dirs:
self.ec.run("rsync -at --del '%s' '%s'" % (d, join(self.unpackDir, "boot/")))
self.ec.run("rsync -at --del '%s/' '%s'" % (mountIsolinux, isolinuxDir))
self.ec.run("rsync -at --del '%s/' '%s'" % (mountSquashfs, liveDir))
self.ec.run("umount --force '%s'" % self.mountDir)
if fixCfgCmd is not None:
self.ec.run(fixCfgCmd)
# copy squashfs root
squashfs = join(liveDir, "filesystem.squashfs")
if exists(squashfs):
self.ec.run("mount -t squashfs -o loop '%s' '%s'" % (squashfs, self.mountDir))
self.ec.run("rsync -at --del '%s/' '%s/'" % (self.mountDir, rootDir))
self.ec.run("umount --force '%s'" % self.mountDir)
# Cleanup
rmdir(self.mountDir)
# set proper permissions
self.ec.run("chmod 6755 '%s'" % join(rootDir, "usr/bin/sudo"))
self.ec.run("chmod 0440 '%s'" % join(rootDir, "etc/sudoers"))
self.returnMessage = "DONE - ISO unpacked to: %s" % self.unpackDir
self.queue.put(self.returnMessage)
except Exception as detail:
self.ec.run("umount --force '%s'" % self.mountDir)
rmdir(self.mountDir)
self.returnMessage = "ERROR: IsoUnpack: %(detail)s" % {"detail": detail}
self.queue.put(self.returnMessage)
def getDirectSubDirectories(self, directory):
subdirs = []
names = listdir(directory)
for name in names:
if isdir(join(directory, name)):
subdirs.append(name)
return subdirs
def hasSquashFs(self, directory):
names = listdir(directory)
for name in names:
if name == "filesystem.squashfs":
return True
return False
class BuildIso(threading.Thread):
def __init__(self, distroPath, queue):
threading.Thread.__init__(self)
self.ec = ExecCmd()
self.dg = DistroGeneral(distroPath)
self.ed = EditDistro(distroPath)
self.queue = queue
self.returnMessage = None
# Paths
distroPath = distroPath.rstrip('/')
if basename(distroPath) == "root":
distroPath = dirname(distroPath)
self.distroPath = distroPath
self.rootPath = join(distroPath, "root")
self.bootPath = join(distroPath, "boot")
self.livePath = join(self.bootPath, "live")
self.scriptDir = abspath(dirname(__file__))
# Check for old dir
oldDir = join(self.bootPath, "solydxk")
if exists(oldDir):
self.ec.run("rm -r %s" % oldDir)
# Make sure live directory exists
if not exists(self.livePath):
self.ec.run("mkdir -p %s" % self.livePath)
# ISO Name
self.isoName = self.dg.description
# ISO distribution
self.isoBaseName = self.dg.getIsoFileName()
self.isoFileName = join(self.distroPath, self.isoBaseName)
# Trackers, and webseeds
self.trackers = ""
self.webseeds = ""
trackersPath = join(self.scriptDir, "files/trackers")
webseedsPath = join(self.scriptDir, "files/webseeds")
if exists(trackersPath):
with open(trackersPath, "r") as f:
lines = f.readlines()
trList = []
for line in lines:
trList.append(line.strip())
self.trackers = ",".join(trList)
if exists(webseedsPath):
with open(webseedsPath, "r") as f:
lines = f.readlines()
wsList = []
for line in lines:
#wsList.append("%s/%s" % (line.strip(), webseedIsoName))
wsList.append("%s/%s" % (line.strip(), self.isoBaseName))
self.webseeds = ",".join(wsList)
def run(self):
try:
if not exists(self.rootPath):
self.returnMessage = "ERROR: Cannot find root directory: %s" % self.rootPath
if not exists(self.bootPath):
self.returnMessage = "ERROR: Cannot find boot directory: %s" % self.bootPath
if self.returnMessage is None:
print("======================================================")
print("INFO: Cleanup and prepare ISO build...")
print("======================================================")
# Clean-up
script = "cleanup.sh"
scriptSource = join(self.scriptDir, "files/{}".format(script))
scriptTarget = join(self.rootPath, script)
if exists(scriptSource):
self.copy_file(scriptSource, scriptTarget)
self.ec.run("chmod a+x %s" % scriptTarget)
plymouthTheme = self.dg.getPlymouthTheme()
#self.ec.run("chroot '%(rootPath)s' /bin/bash %(cleanup)s %(plymouthTheme)s" % {"rootPath": self.rootPath, "cleanup": cleanup, "plymouthTheme": plymouthTheme})
cmd = "/bin/bash %(cleanup)s %(plymouthTheme)s" % {"cleanup": script, "plymouthTheme": plymouthTheme}
self.ed.openTerminal(cmd)
remove(scriptTarget)
rootHome = join(self.rootPath, "root")
nanoHist = join(rootHome, ".nano_history")
if exists(nanoHist):
remove(nanoHist)
bashHist = join(rootHome, ".bash_history")
if exists(bashHist):
remove(bashHist)
# Config naming
regExp = "solyd.*(\d{6}|-bit)"
d = datetime.now()
dateString = d.strftime("%Y%m")
nameString = "{} {}".format(self.isoName, dateString)
# write iso name to boot/isolinux/isolinux.cfg
cfgFile = join(self.bootPath, "isolinux/isolinux.cfg")
if exists(cfgFile):
content = ""
with open(cfgFile, 'r') as f:
content = f.read()
if content != "":
content = re.sub(regExp, nameString, content, flags=re.IGNORECASE)
# Make sure that the paths are correct (correcting very old stuff)
content = re.sub('.lz', '.img', content)
content = re.sub('/solydxk/', '/live/', content)
with open(cfgFile, 'w') as f:
f.write(content)
# Write info for grub (EFI)
grubFile = join(self.bootPath, "boot/grub/grub.cfg")
if exists(grubFile):
content = ""
with open(grubFile, 'r') as f:
content = f.read()
if content != "":
content = re.sub(regExp, nameString, content, flags=re.IGNORECASE)
with open(grubFile, 'w') as f:
f.write(content)
loopbackFile = join(self.bootPath, "boot/grub/loopback.cfg")
if exists(loopbackFile):
content = ""
with open(loopbackFile, 'r') as f:
content = f.read()
if content != "":
content = re.sub(regExp, nameString, content, flags=re.IGNORECASE)
with open(loopbackFile, 'w') as f:
f.write(content)
# Clean boot/live directory
#popen("rm -rf %s/live/*" % self.bootPath)
# Vmlinuz
vmlinuzSymLink = join(self.distroPath, "root/vmlinuz")
if lexists(vmlinuzSymLink):
vmlinuzFile = self.ec.run("ls -al %s | cut -d'>' -f2" % vmlinuzSymLink)[0].strip()
else:
self.returnMessage = "ERROR: %s not found" % vmlinuzSymLink
if self.returnMessage is None:
vmlinuzPath = join(self.distroPath, "root/%s" % vmlinuzFile)
if exists(vmlinuzPath):
print("Copy vmlinuz")
self.copy_file(vmlinuzPath, join(self.livePath, "vmlinuz"))
else:
self.returnMessage = "ERROR: %s not found" % vmlinuzPath
if self.returnMessage is None:
# Initrd
initrdSymLink = join(self.distroPath, "root/initrd.img")
if lexists(initrdSymLink):
initrdFile = self.ec.run("ls -al %s | cut -d'>' -f2" % initrdSymLink)[0].strip()
else:
self.returnMessage = "ERROR: %s not found" % initrdSymLink
if self.returnMessage is None:
initrdPath = join(self.distroPath, "root/%s" % initrdFile)
if exists(initrdPath):
print("Copy initrd")
self.copy_file(initrdPath, join(self.livePath, "initrd.img"))
else:
self.returnMessage = "ERROR: %s not found" % initrdPath
if self.returnMessage is None:
# Generate UUID
#diskDir = join(self.bootPath, ".disk")
#if not exists(diskDir):
#makedirs(diskDir)
#self.ec.run("rm -rf %s/*uuid*" % diskDir)
#self.ec.run("uuidgen -r > %s/live-uuid-generic" % diskDir)
#copy_file(join(diskDir, "live-uuid-generic"), join(diskDir, "live-uuid-generic"))
#Update filesystem.size
#self.ec.run("du -b %(directory)s/root/ 2> /dev/null | tail -1 | awk {'print $1;'} > %(directory)s/live/filesystem.size" % {"directory": self.bootPath})
print("======================================================")
print("INFO: Start building ISO...")
print("======================================================")
# build squash root
print("Creating SquashFS root...")
print("Updating File lists...")
dpkgQuery = ' dpkg -l | awk \'/^ii/ {print $2, $3}\' | sed -e \'s/ /\t/g\' '
self.ec.run('chroot \"' + self.rootPath + '\"' + dpkgQuery + ' > \"' + join(self.livePath, "filesystem.packages") + '\"' )
#dpkgQuery = ' dpkg-query -W --showformat=\'${Package} ${Version}\n\' '
#self.ec.run('chroot \"' + self.rootPath + '\"' + dpkgQuery + ' > \"' + join(self.bootPath, "live/filesystem.manifest") + '\"' )
#copy_file(join(self.bootPath, "live/filesystem.manifest"), join(self.bootPath, "live/filesystem.manifest-desktop"))
# check for existing squashfs root
if exists(join(self.livePath, "filesystem.squashfs")):
print("Removing existing SquashFS root...")
remove(join(self.livePath, "filesystem.squashfs"))
print("Building SquashFS root...")
# check for alternate mksquashfs
# check for custom mksquashfs (for multi-threading, new features, etc.)
mksquashfs = self.ec.run(cmd="echo $MKSQUASHFS", returnAsList=False).strip()
rootPath = join(self.distroPath, "root/")
squashfsPath = join(self.livePath, "filesystem.squashfs")
if mksquashfs == '' or mksquashfs == 'mksquashfs':
try:
nrprocessors = int(int(self.ec.run("nproc", False, False))/2)
if nrprocessors < 1:
nrprocessors = 1
except:
nrprocessors = 1
cmd = "mksquashfs \"{}\" \"{}\" -comp xz -processors {}".format(rootPath, squashfsPath, nrprocessors)
else:
cmd = "{} \"{}\" \"{}\"".format(mksquashfs, rootPath, squashfsPath)
#print(cmd)
self.ec.run(cmd)
# build iso
print("Creating ISO...")
# update manifest files
#self.ec.run("/usr/lib/solydxk/constructor/updateManifest.sh %s" % self.distroPath)
# update md5
print("Updating md5 sums...")
if exists(join(self.bootPath, "md5sum.txt")):
remove(join(self.bootPath, "md5sum.txt"))
if exists(join(self.bootPath, "MD5SUMS")):
remove(join(self.bootPath, "MD5SUMS"))
self.ec.run('cd \"' + self.bootPath + '\"; ' + 'find . -type f -print0 | xargs -0 md5sum > md5sum.txt')
#Remove md5sum.txt, MD5SUMS, boot.cat and isolinux.bin from md5sum.txt
self.ec.run("sed -i '/md5sum.txt/d' %s/md5sum.txt" % self.bootPath)
self.ec.run("sed -i '/MD5SUMS/d' %s/md5sum.txt" % self.bootPath)
self.ec.run("sed -i '/boot.cat/d' %s/md5sum.txt" % self.bootPath)
self.ec.run("sed -i '/isolinux.bin/d' %s/md5sum.txt" % self.bootPath)
#Copy md5sum.txt to MD5SUMS (for Debian compatibility)
self.copy_file(join(self.bootPath, "md5sum.txt"), join(self.bootPath, "MD5SUMS"))
# Update isolinux files
syslinuxPath = join(self.rootPath, "usr/lib/syslinux")
modulesPath = join(syslinuxPath, "modules/bios")
isolinuxPath = join(self.bootPath, "isolinux")
self.ec.run("chmod -R +w {}".format(isolinuxPath))
cat = join(isolinuxPath, "boot.cat")
if exists(cat):
remove(cat)
self.copy_file(join(modulesPath, "chain.c32"), isolinuxPath)
self.copy_file(join(modulesPath, "hdt.c32"), isolinuxPath)
self.copy_file(join(modulesPath, "libmenu.c32"), isolinuxPath)
self.copy_file(join(modulesPath, "libgpl.c32"), isolinuxPath)
self.copy_file(join(modulesPath, "reboot.c32"), isolinuxPath)
self.copy_file(join(modulesPath, "vesamenu.c32"), isolinuxPath)
self.copy_file(join(modulesPath, "poweroff.c32"), isolinuxPath)
self.copy_file(join(modulesPath, "ldlinux.c32"), isolinuxPath)
self.copy_file(join(modulesPath, "libcom32.c32"), isolinuxPath)
self.copy_file(join(modulesPath, "libutil.c32"), isolinuxPath)
self.copy_file(join(self.rootPath, "boot/memtest86+.bin"), join(isolinuxPath, "memtest86"))
self.copy_file("/usr/lib/ISOLINUX/isolinux.bin", isolinuxPath)
# remove existing iso
if exists(self.isoFileName):
print("Removing existing ISO...")
remove(self.isoFileName)
# build iso according to architecture
print("Building ISO...")
self.ec.run('genisoimage -input-charset utf-8 -o \"' + self.isoFileName + '\" -b \"isolinux/isolinux.bin\" -c \"isolinux/boot.cat\" -no-emul-boot -boot-load-size 4 -boot-info-table -V \"' + self.isoName + '\" -cache-inodes -r -J -l \"' + self.bootPath + '\"')
print("Making Hybrid ISO...")
self.ec.run("isohybrid %s" % self.isoFileName)
print("Create ISO md5 file...")
self.ec.run("echo \"$(md5sum \"%s\" | cut -d' ' -f 1) %s\" > \"%s.md5\"" % (self.isoFileName, self.isoBaseName, self.isoFileName))
print("Create Torrent file...")
torrentFile = "%s.torrent" % self.isoFileName
if exists(torrentFile):
remove(torrentFile)
self.ec.run("mktorrent -a \"%s\" -c \"%s\" -w \"%s\" -o \"%s\" \"%s\"" % (self.trackers, self.isoName, self.webseeds, torrentFile, self.isoFileName))
print("======================================================")
self.returnMessage = "DONE - ISO Located at: %s" % self.isoFileName
print((self.returnMessage))
print("======================================================")
self.queue.put(self.returnMessage)
except Exception as detail:
self.returnMessage = "ERROR: BuildIso: %(detail)s" % {"detail": detail}
self.queue.put(self.returnMessage)
def copy_file(self, file_path, destination):
if exists(file_path):
try:
copy(file_path, destination)
except Exception as detail:
print(("ERROR: BuildIso.copy_file: {}".format(detail)))
else:
print(("ERROR: BuildIso.copy_file: cannot find {}".format(file_path)))
# Class to create a chrooted terminal for a given directory
# https://wiki.debian.org/chroot
class EditDistro(object):
def __init__(self, distroPath):
self.ec = ExecCmd()
self.dg = DistroGeneral(distroPath)
distroPath = distroPath.rstrip('/')
if basename(distroPath) == "root":
distroPath = dirname(distroPath)
self.rootPath = join(distroPath, "root")
# ISO edition
self.edition = self.dg.edition
def openTerminal(self, command=""):
# Set some paths
resolveCnfHost = "/etc/resolv.conf"
resolveCnf = join(self.rootPath, "etc/resolv.conf")
resolveCnfBak = "%s.bak" % resolveCnf
wgetrc = join(self.rootPath, "etc/wgetrc")
wgetrcBak = "%s.bak" % wgetrc
terminal = "/tmp/constructor-terminal.sh"
lockDir = join(self.rootPath, "run/lock/")
proc = join(self.rootPath, "proc/")
dev = join(self.rootPath, "dev/")
pts = join(self.rootPath, "dev/pts/")
sys = join(self.rootPath, "sys/")
policy = join(self.rootPath, "usr/sbin/policy-rc.d")
ischroot = join(self.rootPath, "usr/bin/ischroot")
ischrootTmp = join(self.rootPath, "usr/bin/ischroot.tmp")
try:
# temporary create /run/lock
if not exists(lockDir):
makedirs(lockDir)
# setup environment
# copy dns info
if exists(resolveCnf):
move(resolveCnf, resolveCnfBak)
if exists(resolveCnfHost):
copy(resolveCnfHost, resolveCnf)
# umount /proc /dev /dev/pts /sys
self.unmount([pts, dev, proc, sys])
# mount /proc /dev /dev/pts /sys /run /sys
self.ec.run("mount --bind /proc '%s'" % proc)
self.ec.run("mount --bind /dev '%s'" % dev)
self.ec.run("mount --bind /dev/pts '%s'" % pts)
self.ec.run("mount --bind /sys '%s'" % sys)
# copy apt.conf
#copy("/etc/apt/apt.conf", join(self.rootPath, "etc/apt/apt.conf"))
# copy wgetrc
move(wgetrc, wgetrcBak)
copy("/etc/wgetrc", wgetrc)
# Let dpkg only start daemons when desired
scr = "#!/bin/sh\nexit 101\n"
with open(policy, 'w') as f:
f.write(scr)
self.ec.run("chmod a+x %s" % policy)
# Temporary fix ischroot
if not exists(ischrootTmp):
self.ec.run("mv %s %s" % (ischroot, ischrootTmp))
if not exists(ischroot):
self.ec.run("ln -s /bin/true %s" % ischroot)
# HACK: create temporary script for chrooting
if exists(terminal):
remove(terminal)
scr = "#!/bin/sh\nchroot '%s' %s\n" % (self.rootPath, command)
with open(terminal, 'w') as f:
f.write(scr)
self.ec.run("chmod a+x %s" % terminal)
if self.ec.run('which x-terminal-emulator'):
# use x-terminal-emulator if xterm isn't available
if exists("/usr/bin/xterm"):
self.ec.run('export HOME=/root ; xterm -bg black -fg white -rightbar -title \"%s\" -e %s' % (self.edition, terminal))
else:
self.ec.run('export HOME=/root ; x-terminal-emulator -e %s' % terminal)
else:
print('Error: no valid terminal found')
# restore wgetrc
move(wgetrcBak, wgetrc)
# remove apt.conf
#remove(join(self.rootPath, "root/etc/apt/apt.conf"))
# move dns info
if exists(resolveCnfBak):
move(resolveCnfBak, resolveCnf)
else:
remove(resolveCnf)
# umount /proc /dev /dev/pts /sys
self.unmount([pts, dev, proc, sys])
# remove temp script
if exists(terminal):
remove(terminal)
# remove policy script
if exists(policy):
remove(policy)
# replace ischroot
if exists("%s.tmp" % ischroot):
self.ec.run("rm %s" % ischroot)
self.ec.run("mv %s.tmp %s" % (ischroot, ischroot))
# cleanup /run
self.ec.run("rm -rf %s/run/*" % self.rootPath)
except Exception as detail:
# restore wgetrc
move(wgetrcBak, wgetrc)
# remove apt.conf
#remove(join(self.rootPath, "etc/apt/apt.conf"))
# move dns info
if exists(resolveCnfBak):
move(resolveCnfBak, resolveCnf)
else:
remove(resolveCnf)
# umount /proc /dev /dev/pts /sys
self.unmount([pts, dev, proc, sys])
# remove temp script
if exists(terminal):
remove(terminal)
# remove policy script
if exists(policy):
remove(policy)
# replace ischroot
if exists("%s.tmp" % ischroot):
self.ec.run("rm %s" % ischroot)
self.ec.run("mv %s.tmp %s" % (ischroot, ischroot))
# cleanup /run
self.ec.run("rm -rf %s/run/*" % self.rootPath)
errText = 'Error launching terminal: '
print((errText, detail))
def unmount(self, mounts=[]):
for mount in mounts:
self.ec.run("umount --force '%s'" % mount)
self.ec.run("umount -l '%s'" % mount)
class DistroGeneral(object):
def __init__(self, distroPath):
self.ec = ExecCmd()
distroPath = distroPath.rstrip('/')
if basename(distroPath) == "root":
distroPath = dirname(distroPath)
self.distroPath = distroPath
self.rootPath = join(distroPath, "root")
self.edition = basename(distroPath)
self.description = "SolydXK"
infoPath = join(self.rootPath, "etc/solydxk/info")
if exists(infoPath):
self.edition = self.ec.run(cmd="grep EDITION= {} | cut -d'=' -f 2".format(infoPath), returnAsList=False).strip('"')
self.description = self.ec.run(cmd="grep DESCRIPTION= {} | cut -d'=' -f 2".format(infoPath), returnAsList=False).strip('"')
def getPlymouthTheme(self):
plymouthTheme = ""
if exists(join(self.rootPath, "usr/share/plymouth/themes/solydk-logo")):
plymouthTheme = "solydk-logo"
elif exists(join(self.rootPath, "usr/share/plymouth/themes/solydx-logo")):
plymouthTheme = "solydx-logo"
return plymouthTheme
def getIsoFileName(self):
# Get the date string
d = datetime.now()
serial = d.strftime("%Y%m")
# Check for a localized system
localePath = join(self.rootPath, "etc/default/locale")
if exists(localePath):
locale = self.ec.run(cmd="grep LANG= {}".format(localePath), returnAsList=False).strip('"').replace(" ", "")
matchObj = re.search("\=\s*([a-z]{2})", locale)
if matchObj:
language = matchObj.group(1)
if language != "en":
serial += "_{}".format(language)
isoFileName = "{}_{}.iso".format(self.description.lower().replace(' ', '_').split('-')[0], serial)
return isoFileName
|
[
"root@solydxk"
] |
root@solydxk
|
3a79fc6c3eb34308f2013497b29f90ad59a89e7b
|
fc85a54686e13e598541df14c472e8aa744e6713
|
/petisco/extra/sqlalchemy/sql/mysql/mysql_connection.py
|
ccf69974f1b0fbfe9c880d72c61912564fc1f72c
|
[
"MIT"
] |
permissive
|
alice-biometrics/petisco
|
63721751cd43e70825b161a5ece535c80d95b6fa
|
771ebe5c69dc735b8f373c2e7303d3b4eb655044
|
refs/heads/main
| 2023-09-01T03:53:23.642042
| 2023-08-25T05:38:42
| 2023-08-25T05:38:42
| 217,555,512
| 42
| 2
|
MIT
| 2023-09-12T11:06:43
| 2019-10-25T14:48:10
|
Python
|
UTF-8
|
Python
| false
| false
| 1,700
|
py
|
import os
MYSQL_DATABASE_DEFAULT = "mysql_test"
class MySqlConnection:
def __init__(
self,
server_name: str,
driver: str,
user: str,
password: str,
host: str,
port: str,
database_name: str,
url: str,
):
self.server_name = server_name
self.driver = driver
self.user = user
self.password = password
self.host = host
self.port = port
self.database_name = database_name
self.url = url
@staticmethod
def create(
server_name: str = "mysql",
driver: str = "pymysql",
user: str = "root",
password: str = "root",
host: str = "mysql",
port: str = "3306",
database_name: str = MYSQL_DATABASE_DEFAULT,
) -> "MySqlConnection":
url = (
f"{server_name}+{driver}://{user}:{password}@{host}:{port}/{database_name}"
)
return MySqlConnection(
server_name, driver, user, password, host, port, database_name, url
)
@staticmethod
def create_local(database_name: str = MYSQL_DATABASE_DEFAULT) -> "MySqlConnection":
return MySqlConnection.create(
host="localhost", port="3307", database_name=database_name
)
@staticmethod
def from_environ() -> "MySqlConnection":
return MySqlConnection.create(
"mysql",
"pymysql",
os.getenv("MYSQL_USER", "root"),
os.getenv("MYSQL_PASSWORD", "root"),
os.getenv("MYSQL_HOST", "mysql"),
os.getenv("MYSQL_PORT", "3306"),
os.getenv("MYSQL_DATABASE", MYSQL_DATABASE_DEFAULT),
)
|
[
"noreply@github.com"
] |
noreply@github.com
|
cf5801421a18d07d16150302598a4db9e9f4d683
|
a4ed795e6aa22791a3c8f6ce931cd5ce0d8ed705
|
/testproj/settings.py
|
fa9ffeeec840bbfa1314cef360baa6cc481ffd02
|
[] |
no_license
|
ali88z/dj2020
|
28fae534079e9a38f3fc143449fff059b1642374
|
e674961c1671be450a8fc28f78396ecf0fa217fc
|
refs/heads/master
| 2022-11-29T16:47:50.080800
| 2020-07-29T14:52:42
| 2020-07-29T14:52:42
| 274,812,756
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,430
|
py
|
"""
Django settings for testproj project.
Generated by 'django-admin startproject' using Django 3.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vn0by4#ck#3fj-qlm46f!kfpr61t#3wtt(b$5o=zqn9^dicb4_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
#ALLOWED_HOSTS = ['192.168.20.128','192.168.74.130','192.168.1.88']
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'testModel',
'app01',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'testproj.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR+'/templates', ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'libraries': {'mytags': 'testproj.templatetag.mytags'},
},
},
]
WSGI_APPLICATION = 'testproj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'runoob',
'HOST': '127.0.0.1',
'PORT': 3306,
'USER': 'django',
'PASSWORD': '123456',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "statics"),
]
|
[
"zjw@mails.com"
] |
zjw@mails.com
|
84bc89794412d5e88416f0917f873ba361cbb1cd
|
41f28fc3b3c7f34b879bacb2e25157b551c054bb
|
/label_studio/data_manager/functions.py
|
655b82b5cef12f9c8eed602a30de76d3a8b7085e
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
dolanor-galaxy/label-studio
|
cd478cb54e4948cbb5226c02e088465cdaa12a6b
|
722358a6cdfbe5a35e7b16f586675df4b598f74f
|
refs/heads/master
| 2023-08-11T08:52:52.433731
| 2021-09-30T09:52:05
| 2021-09-30T09:52:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,278
|
py
|
"""This file and its contents are licensed under the Apache License 2.0. Please see the included NOTICE for copyright information and LICENSE for a copy of the license.
"""
import logging
from collections import OrderedDict
from django.conf import settings
from rest_framework.generics import get_object_or_404
from core.utils.common import int_from_request
from data_manager.prepare_params import PrepareParams
from data_manager.models import View
from tasks.models import Task
TASKS = 'tasks:'
logger = logging.getLogger(__name__)
class DataManagerException(Exception):
pass
def get_all_columns(project, *_):
""" Make columns info for the frontend data manager
"""
result = {'columns': []}
# frontend uses MST data model, so we need two directional referencing parent <-> child
task_data_children = []
i = 0
data_types = OrderedDict()
# add data types from config again
project_data_types = project.data_types
data_types.update(project_data_types.items())
# all data types from import data
all_data_columns = project.summary.all_data_columns
if all_data_columns:
data_types.update({key: 'Unknown' for key in all_data_columns if key not in data_types})
# remove $undefined$ if there is one type at least in labeling config, because it will be resolved automatically
if len(project_data_types) > 0:
data_types.pop(settings.DATA_UNDEFINED_NAME, None)
for key, data_type in list(data_types.items()): # make data types from labeling config first
column = {
'id': key,
'title': key if key != settings.DATA_UNDEFINED_NAME else 'data',
'type': data_type if data_type in ['Image', 'Audio', 'AudioPlus', 'Unknown'] else 'String',
'target': 'tasks',
'parent': 'data',
'visibility_defaults': {
'explore': True,
'labeling': key in project_data_types or key == settings.DATA_UNDEFINED_NAME
}
}
result['columns'].append(column)
task_data_children.append(column['id'])
i += 1
# --- Data root ---
data_root = {
'id': 'data',
'title': "data",
'type': "List",
'target': 'tasks',
'children': task_data_children
}
result['columns'] += [
# --- Tasks ---
{
'id': 'id',
'title': "ID",
'type': 'Number',
'help': 'Task ID',
'target': 'tasks',
'visibility_defaults': {
'explore': True,
'labeling': False
}
},
{
'id': 'completed_at',
'title': 'Completed',
'type': 'Datetime',
'target': 'tasks',
'help': 'Last annotation date',
'visibility_defaults': {
'explore': True,
'labeling': False
}
},
{
'id': 'total_annotations',
'title': 'Annotations',
'type': "Number",
'target': 'tasks',
'help': 'Total annotations per task',
'visibility_defaults': {
'explore': True,
'labeling': True
}
},
{
'id': 'cancelled_annotations',
'title': "Cancelled",
'type': "Number",
'target': 'tasks',
'help': 'Total cancelled (skipped) annotations',
'visibility_defaults': {
'explore': True,
'labeling': False
}
},
{
'id': 'total_predictions',
'title': "Predictions",
'type': "Number",
'target': 'tasks',
'help': 'Total predictions per task',
'visibility_defaults': {
'explore': True,
'labeling': False
}
},
{
'id': 'annotators',
'title': 'Annotated by',
'type': 'List',
'target': 'tasks',
'help': 'All users who completed the task',
'schema': {'items': project.organization.members.values_list('user__id', flat=True)},
'visibility_defaults': {
'explore': True,
'labeling': False
}
},
{
'id': 'annotations_results',
'title': "Annotation results",
'type': "String",
'target': 'tasks',
'help': 'Annotation results stacked over all annotations',
'visibility_defaults': {
'explore': False,
'labeling': False
}
},
{
'id': 'annotations_ids',
'title': "Annotation IDs",
'type': "String",
'target': 'tasks',
'help': 'Annotation IDs stacked over all annotations',
'visibility_defaults': {
'explore': False,
'labeling': False
}
},
{
'id': 'predictions_score',
'title': "Prediction score",
'type': "Number",
'target': 'tasks',
'help': 'Average prediction score over all task predictions',
'visibility_defaults': {
'explore': False,
'labeling': False
}
},
{
'id': 'predictions_results',
'title': "Prediction results",
'type': "String",
'target': 'tasks',
'help': 'Prediction results stacked over all predictions',
'visibility_defaults': {
'explore': False,
'labeling': False
}
},
{
'id': 'file_upload',
'title': "Source filename",
'type': "String",
'target': 'tasks',
'help': 'Source filename from import step',
'visibility_defaults': {
'explore': False,
'labeling': False
}
},
{
'id': 'created_at',
'title': 'Created at',
'type': 'Datetime',
'target': 'tasks',
'help': 'Task creation time',
'visibility_defaults': {
'explore': False,
'labeling': False
}
}
]
result['columns'].append(data_root)
return result
def get_prepare_params(request, project):
# use filters and selected items from view
view_id = int_from_request(request.GET, 'view_id', 0)
if view_id > 0:
view = get_object_or_404(request, View, pk=view_id)
if view.project.pk != project.pk:
raise DataManagerException('Project and View mismatch')
prepare_params = view.get_prepare_tasks_params(add_selected_items=True)
# use filters and selected items from request if it's specified
else:
selected = request.data.get('selectedItems', {"all": True, "excluded": []})
if not isinstance(selected, dict):
raise DataManagerException('selectedItems must be dict: {"all": [true|false], '
'"excluded | included": [...task_ids...]}')
filters = request.data.get('filters', None)
ordering = request.data.get('ordering', [])
prepare_params = PrepareParams(project=project.id, selectedItems=selected, data=request.data,
filters=filters, ordering=ordering)
return prepare_params
def get_prepared_queryset(request, project):
prepare_params = get_prepare_params(request, project)
queryset = Task.prepared.only_filtered(prepare_params=prepare_params)
return queryset
def evaluate_predictions(tasks):
""" Call ML backend for prediction evaluation of the task queryset
"""
if not tasks:
return
project = tasks[0].project
for ml_backend in project.ml_backends.all():
# tasks = tasks.filter(~Q(predictions__model_version=ml_backend.model_version))
ml_backend.predict_many_tasks(tasks)
def filters_ordering_selected_items_exist(data):
return data.get('filters') or data.get('ordering') or data.get('selectedItems')
|
[
"noreply@github.com"
] |
noreply@github.com
|
2fbd7c9248f1dcc4aa90678c7973c0971038f7b3
|
dbeae28942f79ebe1f844628baf6cb8f7251609b
|
/modules/state.py
|
961e9b0dd1677c68fc8b876bae6fae442c30c3b4
|
[] |
no_license
|
kouheiszk/pokemon-bot
|
3226614ad699dca261f2c97523b70d3c91a08b00
|
ba7404b7f6120581ac6602ca0c00ecbd9e0cbfc1
|
refs/heads/master
| 2020-05-21T10:12:07.376595
| 2016-09-13T10:57:01
| 2016-09-13T10:57:01
| 66,206,829
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 668
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from modules.catch import Catch
from modules.entities.badges import Badges
from modules.entities.hatched_eggs import HatchedEggs
from modules.entities.inventory import Inventory
from modules.entities.map_objects import MapObjects
from modules.entities.player import Player
from modules.entities.settings import Settings
class State(object):
def __init__(self):
self.player = Player()
self.inventory = Inventory()
self.badges = Badges()
self.settings = Settings()
self.map_objects = MapObjects()
self.catch = Catch()
self.hatched_eggs = HatchedEggs(self.inventory)
|
[
"kouhei.szk@gmail.com"
] |
kouhei.szk@gmail.com
|
47b910274ca6546bd96488e2c3027896b833a188
|
7abd8bbbba8f401c4ce9d9ec550a0cae4a6f19ed
|
/bingads/v12/bulk/entities/__init__.py
|
afc5d3d8bf175347a50c466420cd874f00447f89
|
[
"MIT"
] |
permissive
|
stevenblanton/BingAds-Python-SDK
|
fd2f119db51e1a91962aa5ee4bb86344e58078a8
|
5b6e6499ae1dcc6fb8ba3032ad1a2b6ee63705c9
|
refs/heads/master
| 2020-09-05T12:11:04.168580
| 2019-11-01T15:49:08
| 2019-11-01T15:49:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 909
|
py
|
__author__ = 'Bing Ads SDK Team'
__email__ = 'bing_ads_sdk@microsoft.com'
from .common import *
from .bulk_error import *
from .bulk_entity import *
from .bid_suggestion_data import *
from .unknown_bulk_entity import *
from .bulk_account import *
from .bulk_budget import *
from .bulk_campaign import *
from .bulk_ad_group import *
from .bulk_keyword import *
from .bulk_campaign_product_scope import *
from .bulk_ad_group_product_partition import *
from .bulk_campaign_negative_dynamic_search_ad_target import *
from .bulk_ad_group_dynamic_search_ad_target import *
from .bulk_ad_group_negative_dynamic_search_ad_target import *
from .ad_extensions import *
from .bulk_ads import *
from .bulk_negative_keywords import *
from .bulk_negative_sites import *
from .audiences import *
from .target_criterions import *
from .labels import *
from .bulk_offline_conversion import *
from .bulk_experiment import *
|
[
"qitia@microsoft.com"
] |
qitia@microsoft.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.