blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e1c2fca2bad35624293caa5c903e7e1a37fcb96d | e35eb92b5ab6547119585004b9eea3cafe948050 | /efsw/archive/errors.py | 3b9ac8626e58cb7513fc221356b582c5bec573f4 | [] | no_license | einsfr/mmkit | 0a084db85b2cf5ba268e692676095d768733f387 | f12bc2f83254a3123e02abdc105816cc04c438b5 | refs/heads/master | 2020-12-31T05:56:19.287611 | 2016-06-10T05:56:58 | 2016-06-10T05:56:58 | 29,473,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | ITEM_LINK_SELF_SELF = 'Элемент не может иметь связь с самим собой.'
ITEM_LINK_TYPE_UNKNOWN = 'Неизвестный тип связи между элементами: {0}.'
ITEM_NOT_FOUND = 'Элемент(ы) с ID {0} не существует(ют).'
STORAGE_NOT_FOUND = 'Хранилище(а) с ID {0} не существует(ют).'
STORAGE_NOT_ALLOWED_AS_ARCHIVE = 'Хранилище(а) с ID {0} нельзя использовать как архивные.'
CATEGORY_NOT_FOUND = 'Категория(и) с ID {0} не существует(ют).'
| [
"einsfr@users.noreply.github.com"
] | einsfr@users.noreply.github.com |
b2d24a26d42b2a0c76b31118dd956f8e49e2a7bd | f205319c1a6af71ee6433a737de4796ac7c145f9 | /task2.py | 6c0b143278a1a507e89c1aaa1f67aeb362f453dd | [] | no_license | photosartd/Math-models-Lab2 | ef1199f01dbd2d1937ff8f622f31d5f7bc7b33a2 | e1dc314ba6193a65e25e6885fbd1e7bc20f5fef9 | refs/heads/main | 2023-05-03T23:05:12.473052 | 2021-04-19T09:21:33 | 2021-04-19T09:21:33 | 359,137,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 651 | py | import numpy as np
round_to = 5
r = np.array([6,9,2])
C_1 = np.array([6,20,20])
C_s = 36992
T = 51
print(r*5)
def task2(r, C_1, C_s, T, t_s=None, if_print=True):
if t_s is None:
t_s = round(np.sqrt((2*C_s)/sum(C_1*r)), round_to)
q = r*t_s
D = round(sum(C_1*r)*t_s*T/2 + C_s*T/t_s, round_to)
else:
q = r*t_s
D = round(sum(C_1*r)*t_s*T/2 + C_s*T/t_s, round_to)
if if_print:
print(f'\
t_s = {t_s}\
q_i = {q}\
D = {D}\
')
return (t_s, q, D)
optimum = task2(r, C_1, C_s, T)
half_ts = task2(r,C_1, C_s,T, t_s=9)
two_ts = task2(r,C_1, C_s,T, t_s=34) | [
"dmitry.trofimow2011@gmail.com"
] | dmitry.trofimow2011@gmail.com |
3a83ed261321a3290a3fc74e77077b67d6eaa4d5 | 24d2a9af8e78fedf2e5f6297353015aaabeb98a1 | /hello.py | 4ee8b95099c6437bf2729d12444cd5dc09aa8c99 | [
"Apache-2.0"
] | permissive | AnastasiiaBlaha/python_workshop | d875a574da99add0c68630d6e3b844f4f8ad08f7 | 6fd0305c748a1c519fa09fcad8a5c336028c5e3e | refs/heads/master | 2020-09-19T19:46:25.112521 | 2019-11-26T20:34:12 | 2019-11-26T20:34:12 | 224,280,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,218 | py | #!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""A tiny Python program to check that Python is working.
Try running this program from the command line like this:
python hello.py
python hello.py Alice
That should print:
Hello World -or- Hello Alice
Try changing the 'Hello' to 'Howdy' and run again.
Once you have that working, you're ready for class -- you can edit
and run Python code; now you just need to learn Python!
"""
import sys
# # Define a main() function that prints a little greeting.
# def main():
# # Get the name from the command line, using 'World' as a fallback.
# if len(sys.argv) >= 2:
# name = sys.argv[1] + " " + sys.argv[2]
# else:
# name = 'World'
# print 'yay!', name
# # This is the standard boilerplate that calls the main() function.
# if __name__ == '__main__':
# main()
def repeat(arg):
return arg * 8
def main():
name = sys.argv[1]
if name == 'Tom':
print repeat(name + ' ') + ' ' + '!!!'
else:
print repeat(name + ' ')
if __name__ == '__main__':
main() | [
"anastasiia.blaha@gmail.com"
] | anastasiia.blaha@gmail.com |
1217e3e57869565f3ec42a80986e66bb1d63dbd2 | e00fe1e065b448f6f8c0472ed2b8a39991fa7b1b | /Fuzzy_clustering/version2/dataset_manager/create_datasets_pca.py | 0542341ca3bd78c1d9c10831c04312420e50e87c | [
"Apache-2.0"
] | permissive | joesider9/forecasting_library | 1a4ded5b09fc603f91fa1c075e79fc2ed06c08a8 | db07ff8f0f2693983058d49004f2fc6f8849d197 | refs/heads/master | 2023-03-29T12:18:22.261488 | 2021-04-01T08:57:08 | 2021-04-01T08:57:08 | 319,906,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65,155 | py | import os
import joblib
import numpy as np
import pandas as pd
from joblib import Parallel
from joblib import delayed
from pytz import timezone
from sklearn.decomposition import KernelPCA
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import MinMaxScaler
from Fuzzy_clustering.version2.common_utils.logging import create_logger
from Fuzzy_clustering.version2.dataset_manager.common_utils import my_scorer
from Fuzzy_clustering.version2.dataset_manager.common_utils import stack_3d
from Fuzzy_clustering.version2.dataset_manager.common_utils import stack_daily_nwps
class DatasetCreatorPCA:
def __init__(self, project, data=None, n_jobs=1, test=False, dates=None):
if test is None:
raise NotImplemented('test is none for short-term, not implemented for PCA')
self.data = data
self.is_for_test = test
self.project_name = project['_id']
self.static_data = project['static_data']
self.path_nwp_project = self.static_data['pathnwp']
self.path_data = self.static_data['path_data']
self.areas = self.static_data['areas']
self.area_group = self.static_data['area_group']
self.nwp_model = self.static_data['NWP_model']
self.nwp_resolution = self.static_data['NWP_resolution']
self.location = self.static_data['location']
self.compress = True if self.nwp_resolution == 0.05 else False
self.n_jobs = n_jobs
self.variables = self.static_data['data_variables']
self.logger = create_logger(logger_name=f"log_{self.static_data['project_group']}",
abs_path=self.path_nwp_project,
logger_path=f"log_{self.static_data['project_group']}.log", write_type='a')
if self.data is not None:
self.dates = self.check_dates()
elif dates is not None:
self.dates = dates
def check_dates(self):
# Extract dates of power measurements.
start_date = pd.to_datetime(self.data.index[0].strftime('%d%m%y'), format='%d%m%y')
end_date = pd.to_datetime(self.data.index[-1].strftime('%d%m%y'), format='%d%m%y')
dates = pd.date_range(start_date, end_date)
data_dates = pd.to_datetime(np.unique(self.data.index.strftime('%d%m%y')), format='%d%m%y')
dates = [d for d in dates if d in data_dates]
self.logger.info('Dates is checked. Number of time samples %s', str(len(dates)))
return pd.DatetimeIndex(dates)
def make_dataset_res(self):
nwp_3d_pickle = 'nwps_3d_test.pickle' if self.is_for_test else 'nwps_3d.pickle'
dataset_cnn_pickle = 'dataset_cnn_test.pickle' if self.is_for_test else 'dataset_cnn.pickle'
nwp_3d_pickle = os.path.join(self.path_data, nwp_3d_pickle)
dataset_cnn_pickle = os.path.join(self.path_data, dataset_cnn_pickle)
if not (os.path.exists(nwp_3d_pickle) and os.path.exists(dataset_cnn_pickle)):
data, x_3d = self.get_3d_dataset()
else:
data = joblib.load(nwp_3d_pickle)
x_3d = joblib.load(dataset_cnn_pickle) # FIXME: unused variable
data_path = self.path_data
if not isinstance(self.areas, dict):
self.dataset_for_single_farm(data, data_path)
else:
dates_stack = []
for t in self.dates:
p_dates = pd.date_range(t + pd.DateOffset(hours=25), t + pd.DateOffset(hours=48), freq='H')
dates = [dt.strftime('%d%m%y%H%M') for dt in p_dates if dt in self.data.index]
dates_stack.append(dates)
flag = False
for i, p_dates in enumerate(dates_stack):
t = self.dates[i]
file_name = os.path.join(self.path_nwp_project, self.nwp_model + '_' + t.strftime('%d%m%y') + '.pickle')
if os.path.exists(file_name):
nwps = joblib.load(file_name)
for date in p_dates:
try:
nwp = nwps[date]
if len(nwp['lat'].shape) == 1:
nwp['lat'] = nwp['lat'][:, np.newaxis]
if len(nwp['long'].shape) == 1:
nwp['long'] = nwp['long'][np.newaxis, :]
lats = (np.where((nwp['lat'][:, 0] >= self.area_group[0][0]) & (
nwp['lat'][:, 0] <= self.area_group[1][0])))[0]
longs = (np.where((nwp['long'][0, :] >= self.area_group[0][1]) & (
nwp['long'][0, :] <= self.area_group[1][1])))[0]
lats_group = nwp['lat'][lats]
longs_group = nwp['long'][:, longs]
flag = True
break
except Exception:
continue
if flag:
break
self.dataset_for_multiple_farms(data, self.areas, lats_group, longs_group)
def correct_nwps(self, nwp, variables):
if nwp['lat'].shape[0] == 0:
area_group = self.projects[0]['static_data']['area_group']
resolution = self.projects[0]['static_data']['NWP_resolution']
nwp['lat'] = np.arange(area_group[0][0], area_group[1][0] + resolution / 2,
resolution).reshape(-1, 1)
nwp['long'] = np.arange(area_group[0][1], area_group[1][1] + resolution / 2,
resolution).reshape(-1, 1).T
for var in nwp.keys():
if not var in {'lat', 'long'}:
if nwp['lat'].shape[0] != nwp[var].shape[0]:
nwp[var] = nwp[var].T
if 'WS' in variables and not 'WS' in nwp.keys():
if 'Uwind' in nwp.keys() and 'Vwind' in nwp.keys():
if nwp['Uwind'].shape[0] > 0 and nwp['Vwind'].shape[0] > 0:
r2d = 45.0 / np.arctan(1.0)
wspeed = np.sqrt(np.square(nwp['Uwind']) + np.square(nwp['Vwind']))
wdir = np.arctan2(nwp['Uwind'], nwp['Vwind']) * r2d + 180
nwp['WS'] = wspeed
nwp['WD'] = wdir
if 'Temp' in nwp.keys():
nwp['Temperature'] = nwp['Temp']
del nwp['Temp']
return nwp
def get_3d_dataset(self):
dates_stack = []
for t in self.dates:
p_dates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47),
freq='H') # 47 hours: 00:00 -> 23:00
dates = [dt.strftime('%d%m%y%H%M') for dt in p_dates if dt in self.data.index]
dates_stack.append(dates) # For each date we have prediction append the next 47 hours
area = self.area_group if isinstance(self.areas, dict) else self.areas
nwp = stack_daily_nwps(self.dates[0], dates_stack[0], self.path_nwp_project,
self.nwp_model, area, self.variables,
self.compress, self.static_data['type'])
nwp_daily = Parallel(n_jobs=self.n_jobs)(
delayed(stack_daily_nwps)(self.dates[i], p_dates, self.path_nwp_project,
self.nwp_model, area, self.variables,
self.compress, self.static_data['type'])
for i, p_dates in enumerate(dates_stack))
x = np.array([])
data_var = dict()
for var in self.variables:
if (var == 'WS' and self.static_data['type'] == 'wind') or \
(var == 'Flux' and self.static_data['type'] == 'pv'):
data_var[var + '_prev'] = x
data_var[var] = x
data_var[var + '_next'] = x
else:
data_var[var] = x
data_var['dates'] = x
x_3d = np.array([])
for arrays in nwp_daily:
nwp = arrays[0]
x_2d = arrays[1]
if x_2d.shape[0] != 0:
for var in nwp.keys():
if var != 'dates':
data_var[var] = stack_3d(data_var[var], nwp[var])
else:
data_var[var] = np.hstack((data_var[var], nwp[var]))
x_3d = stack_3d(x_3d, x_2d)
self.logger.info('NWP data stacked for date %s', arrays[2])
if self.is_for_test:
joblib.dump(data_var, os.path.join(self.path_data, 'nwps_3d_test.pickle'))
joblib.dump(x_3d, os.path.join(self.path_data, 'dataset_cnn_test.pickle'))
else:
joblib.dump(data_var, os.path.join(self.path_data, 'nwps_3d.pickle'))
joblib.dump(x_3d, os.path.join(self.path_data, 'dataset_cnn.pickle'))
self.logger.info('NWP stacked data saved')
return data_var, x_3d
def train_pca(self, data, components, level):
scaler = MinMaxScaler()
data_scaled = scaler.fit_transform(data)
param_grid = [{
"gamma": np.logspace(-3, 0, 20),
}]
kpca = KernelPCA(n_components=components, fit_inverse_transform=True, n_jobs=self.n_jobs)
grid_search = GridSearchCV(kpca, param_grid, cv=3, scoring=my_scorer, n_jobs=self.n_jobs)
grid_search.fit(data_scaled)
kpca = grid_search.best_estimator_
fname = os.path.join(self.path_data, 'kpca_' + level + '.pickle')
joblib.dump({'scaler': scaler, 'kpca': kpca}, fname)
def pca_transform(self, data, components, level):
fname = os.path.join(self.path_data, 'kpca_' + level + '.pickle')
if not os.path.exists(fname):
self.train_pca(data, components, level)
models = joblib.load(fname)
data_scaled = models['scaler'].transform(data)
data_compress = models['kpca'].transform(data_scaled)
return data_compress
def dataset_for_single_farm(self, data, data_path):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat(
[dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour', 'month'])])
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
X0 = np.transpose(data[var + '_prev'], [0, 2, 1])
X0_level0 = X0[:, 2, 2]
X1 = np.transpose(data[var], [0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.pca_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.pca_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.pca_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.pca_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = np.transpose(data[var + '_next'], [0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'flux' if var == 'Flux' else 'wind'
var_sort = 'fl' if var == 'Flux' else 'ws'
col = ['p_' + var_name] + ['n_' + var_name] + [var_name]
col = col + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X0_level0.reshape(-1, 1), X2_level0.reshape(-1, 1), X1_level1.reshape(-1, 1), X1_level3d,
X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = np.transpose(data[var], [0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.pca_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.pca_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.pca_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.pca_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud' if var == 'Cloud' else 'direction'
var_sort = 'cl' if var == 'Cloud' else 'wd'
col = [var_name] + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X1_level1.reshape(-1, 1), X1_level3d, X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
X2 = np.transpose(data[var], [0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'Temp' if var == 'Temperature' else 'wind'
var_sort = 'tp' if var == 'Temperature' else 'ws'
col = [var_name]
X = X2_level0
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
dataset_X = dataset_X
dataset_y = self.data.loc[dataset_X.index].to_frame()
dataset_y.columns = ['target']
if self.is_for_test:
ind = joblib.load(os.path.join(data_path, 'dataset_columns_order.pickle'))
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
dataset_X.to_csv(os.path.join(self.path_data, 'dataset_X_test.csv'))
dataset_y.to_csv(os.path.join(self.path_data, 'dataset_y_test.csv'))
self.logger.info('Successfully dataset created for Evaluation for %s', self.project_name)
else:
corr = []
for f in range(dataset_X.shape[1]):
corr.append(np.abs(np.corrcoef(dataset_X.values[:, f], dataset_y.values.ravel())[1, 0]))
ind = np.argsort(np.array(corr))[::-1]
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
joblib.dump(ind, os.path.join(data_path, 'dataset_columns_order.pickle'))
dataset_X.to_csv(os.path.join(self.path_data, 'dataset_X.csv'))
dataset_y.to_csv(os.path.join(self.path_data, 'dataset_y.csv'))
self.logger.info('Successfully dataset created for training for %s', self.project_name)
def dataset_for_multiple_farms(self, data, areas, lats_group, longs_group):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat(
[dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour', 'month'])])
for var in self.variables:
for area_name, area in areas.items():
if len(area) > 1:
lats = (np.where((lats_group[:, 0] >= area[0, 0]) & (lats_group[:, 0] <= area[1, 0])))[0]
longs = (np.where((longs_group[0, :] >= area[0, 1]) & (longs_group[0, :] <= area[1, 1])))[0]
else:
lats = (np.where((lats_group[:, 0] >= area[0]) & (lats_group[:, 0] <= area[2])))[0]
longs = (np.where((longs_group[0, :] >= area[1]) & (longs_group[0, :] <= area[3])))[0]
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
X0 = data[var + '_prev'][:, lats, :][:, :, longs]
X0 = X0.reshape(-1, X0.shape[1] * X0.shape[2])
level = var + '_prev_' + area_name
self.logger.info('Begin PCA training for %s', level)
X0_compressed = self.pca_transform(X0, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = data[var + '_next'][:, lats, :][:, :, longs]
X2 = X2.reshape(-1, X2.shape[1] * X2.shape[2])
level = var + '_next_' + area_name
self.logger.info('Begin PCA training for %s', level)
X2_compressed = self.pca_transform(X2, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'flux_' + area_name if var == 'Flux' else 'wind_' + area_name
var_sort = 'fl_' + area_name if var == 'Flux' else 'ws_' + area_name
col = ['p_' + var_name + '.' + str(i) for i in range(3)]
col += ['n_' + var_name + '.' + str(i) for i in range(3)]
col += [var_name + '.' + str(i) for i in range(9)]
X = np.hstack((X0_compressed, X2_compressed, X1_compressed))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud_' + area_name if var == 'Cloud' else 'direction_' + area_name
var_sort = 'cl_' + area_name if var == 'Cloud' else 'wd_' + area_name
col = [var_name + '.' + str(i) for i in range(9)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
var_sort = 'tp_' + area_name if var == 'Temperature' else 'ws_' + area_name
col = [var_name + '.' + str(i) for i in range(3)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
col = []
col_p = []
col_n = []
for area_name, area in areas.items():
var_name = 'flux_' + area_name if var == 'Flux' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
col_p += ['p_' + var_name + '.' + str(i) for i in range(3)]
col_n += ['n_' + var_name + '.' + str(i) for i in range(3)]
var_name = 'flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
var_name = 'p_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_p].mean(axis=1)
var_name = 'n_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_n].mean(axis=1)
elif var in {'WD', 'Cloud'}:
col = []
for area_name, area in areas.items():
var_name = 'cloud_' + area_name if var == 'Cloud' else 'direction_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
var_name = 'cloud' if var == 'Cloud' else 'direction'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
col = []
for area_name, area in areas.items():
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(3)]
var_name = 'Temp' if var == 'Temperature' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
dataset_y = self.data.loc[dataset_X.index].to_frame()
dataset_y.columns = ['target']
if self.is_for_test:
ind = joblib.load(os.path.join(self.path_data, 'dataset_columns_order.pickle'))
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
dataset_X.to_csv(os.path.join(self.path_data, 'dataset_X_test.csv'))
dataset_y.to_csv(os.path.join(self.path_data, 'dataset_y_test.csv'))
self.logger.info('Successfully dataset created for Evaluation for %s', self.project_name)
else:
corr = []
for f in range(dataset_X.shape[1]):
corr.append(np.abs(np.corrcoef(dataset_X.values[:, f], dataset_y.values.ravel())[1, 0]))
ind = np.argsort(np.array(corr))[::-1]
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
joblib.dump(ind, os.path.join(self.path_data, 'dataset_columns_order.pickle'))
dataset_X.to_csv(os.path.join(self.path_data, 'dataset_X.csv'))
dataset_y.to_csv(os.path.join(self.path_data, 'dataset_y.csv'))
self.logger.info('Successfully dataset created for training for %s', self.project_name)
def make_dataset_res_offline(self, utc=False):
def datetime_exists_in_tz(dt, tz):
try:
dt.tz_localize(tz)
return True
except:
return False
data, X_3d = self.get_3d_dataset_offline(utc)
if not isinstance(self.areas, dict):
X = self.dataset_for_single_farm_offline(data)
else:
dates_stack = []
for t in self.dates:
pdates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H')
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates]
dates_stack.append(dates)
flag = False
for i, pdates in enumerate(dates_stack):
t = self.dates[i]
fname = os.path.join(self.path_nwp_project, self.nwp_model + '_' + t.strftime('%d%m%y') + '.pickle')
if os.path.exists(fname):
nwps = joblib.load(fname)
for date in pdates:
try:
nwp = nwps[date]
if len(nwp['lat'].shape) == 1:
nwp['lat'] = nwp['lat'][:, np.newaxis]
if len(nwp['long'].shape) == 1:
nwp['long'] = nwp['long'][np.newaxis, :]
lats = (np.where((nwp['lat'][:, 0] >= self.area_group[0][0]) & (
nwp['lat'][:, 0] <= self.area_group[1][0])))[0]
longs = (np.where((nwp['long'][0, :] >= self.area_group[0][1]) & (
nwp['long'][0, :] <= self.area_group[1][1])))[0]
lats_group = nwp['lat'][lats]
longs_group = nwp['long'][:, longs]
flag = True
break
except:
continue
if flag:
break
X = self.dataset_for_multiple_farms_offline(data, self.areas, lats_group, longs_group)
return X, X_3d
def get_3d_dataset_offline(self, utc):
def datetime_exists_in_tz(dt, tz):
try:
dt.tz_localize(tz)
return True
except:
return False
dates_stack = []
for dt in self.dates:
if utc:
pdates = pd.date_range(dt + pd.DateOffset(hours=25), dt + pd.DateOffset(hours=48), freq='H')
dates = [t.strftime('%d%m%y%H%M') for t in pdates]
dates_stack.append(dates)
else:
pdates = pd.date_range(dt + pd.DateOffset(hours=25), dt + pd.DateOffset(hours=48), freq='H')
indices = [i for i, t in enumerate(pdates) if datetime_exists_in_tz(t, tz=timezone('Europe/Athens'))]
pdates = pdates[indices]
pdates = pdates.tz_localize(timezone('Europe/Athens'))
pdates = pdates.tz_convert(timezone('UTC'))
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates]
dates_stack.append(dates)
if not isinstance(self.areas, dict):
nwp_daily = Parallel(n_jobs=self.n_jobs)(
delayed(stack_daily_nwps)(self.dates[i], pdates, self.path_nwp_project, self.nwp_model,
self.areas, self.variables, self.compress, self.static_data['type'])
for i, pdates in enumerate(dates_stack))
else:
nwp = stack_daily_nwps(self.dates[0], dates_stack[0], self.path_nwp_project, self.nwp_model,
self.area_group,
self.variables, self.compress, self.static_data['type'])
nwp_daily = Parallel(n_jobs=self.n_jobs)(
delayed(stack_daily_nwps)(self.dates[i], pdates, self.path_nwp_project, self.nwp_model,
self.area_group, self.variables, self.compress, self.static_data['type'])
for i, pdates in enumerate(dates_stack))
X = np.array([])
data_var = dict()
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
data_var[var + '_prev'] = X
data_var[var] = X
data_var[var + '_next'] = X
else:
data_var[var] = X
data_var['dates'] = X
X_3d = np.array([])
for arrays in nwp_daily:
nwp = arrays[0]
x_2d = arrays[1]
if x_2d.shape[0] != 0:
for var in nwp.keys():
if var != 'dates':
data_var[var] = stack_3d(data_var[var], nwp[var])
else:
data_var[var] = np.hstack((data_var[var], nwp[var]))
X_3d = stack_3d(X_3d, x_2d)
self.logger.info('NWP data stacked for date %s', arrays[2])
return data_var, X_3d
def dataset_for_single_farm_offline(self, data):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat(
[dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour', 'month'])])
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
X0 = np.transpose(data[var + '_prev'], [0, 2, 1])
X0_level0 = X0[:, 2, 2]
X1 = np.transpose(data[var], [0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.pca_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.pca_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.pca_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.pca_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = np.transpose(data[var + '_next'], [0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'flux' if var == 'Flux' else 'wind'
var_sort = 'fl' if var == 'Flux' else 'ws'
col = ['p_' + var_name] + ['n_' + var_name] + [var_name]
col = col + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X0_level0.reshape(-1, 1), X2_level0.reshape(-1, 1), X1_level1.reshape(-1, 1), X1_level3d,
X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = np.transpose(data[var], [0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.pca_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.pca_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.pca_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.pca_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud' if var == 'Cloud' else 'direction'
var_sort = 'cl' if var == 'Cloud' else 'wd'
col = [var_name] + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X1_level1.reshape(-1, 1), X1_level3d, X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
X2 = np.transpose(data[var], [0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'Temp' if var == 'Temperature' else 'wind'
var_sort = 'tp' if var == 'Temperature' else 'ws'
col = [var_name]
X = X2_level0
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
ind = joblib.load(os.path.join(self.path_data, 'dataset_columns_order.pickle'))
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
return dataset_X
def dataset_for_multiple_farms_offline(self, data, areas, lats_group, longs_group):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat(
[dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour', 'month'])])
for var in self.variables:
for area_name, area in areas.items():
if len(area) > 1:
lats = (np.where((lats_group[:, 0] >= area[0, 0]) & (lats_group[:, 0] <= area[1, 0])))[0]
longs = (np.where((longs_group[0, :] >= area[0, 1]) & (longs_group[0, :] <= area[1, 1])))[0]
else:
lats = (np.where((lats_group[:, 0] >= area[0]) & (lats_group[:, 0] <= area[2])))[0]
longs = (np.where((longs_group[0, :] >= area[1]) & (longs_group[0, :] <= area[3])))[0]
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
X0 = data[var + '_prev'][:, lats, :][:, :, longs]
X0 = X0.reshape(-1, X0.shape[1] * X0.shape[2])
level = var + '_prev_' + area_name
self.logger.info('Begin PCA training for %s', level)
X0_compressed = self.pca_transform(X0, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = data[var + '_next'][:, lats, :][:, :, longs]
X2 = X2.reshape(-1, X2.shape[1] * X2.shape[2])
level = var + '_next_' + area_name
self.logger.info('Begin PCA training for %s', level)
X2_compressed = self.pca_transform(X2, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'flux_' + area_name if var == 'Flux' else 'wind_' + area_name
var_sort = 'fl_' + area_name if var == 'Flux' else 'ws_' + area_name
col = ['p_' + var_name + '.' + str(i) for i in range(3)]
col += ['n_' + var_name + '.' + str(i) for i in range(3)]
col += [var_name + '.' + str(i) for i in range(9)]
X = np.hstack((X0_compressed, X2_compressed, X1_compressed))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud_' + area_name if var == 'Cloud' else 'direction_' + area_name
var_sort = 'cl_' + area_name if var == 'Cloud' else 'wd_' + area_name
col = [var_name + '.' + str(i) for i in range(9)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
var_sort = 'tp_' + area_name if var == 'Temperature' else 'ws_' + area_name
col = [var_name + '.' + str(i) for i in range(3)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
col = []
col_p = []
col_n = []
for area_name, area in areas.items():
var_name = 'flux_' + area_name if var == 'Flux' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
col_p += ['p_' + var_name + '.' + str(i) for i in range(3)]
col_n += ['n_' + var_name + '.' + str(i) for i in range(3)]
var_name = 'flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
var_name = 'p_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_p].mean(axis=1)
var_name = 'n_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_n].mean(axis=1)
elif var in {'WD', 'Cloud'}:
col = []
for area_name, area in areas.items():
var_name = 'cloud_' + area_name if var == 'Cloud' else 'direction_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
var_name = 'cloud' if var == 'Cloud' else 'direction'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
col = []
for area_name, area in areas.items():
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(3)]
var_name = 'Temp' if var == 'Temperature' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
ind = joblib.load(os.path.join(self.path_data, 'dataset_columns_order.pickle'))
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
return dataset_X
def make_dataset_res_online(self, utc=False):
def datetime_exists_in_tz(dt, tz):
try:
dt.tz_localize(tz)
return True
except:
return False
data, X_3d = self.get_3d_dataset_online(utc)
if not isinstance(self.areas, dict):
X = self.dataset_for_single_farm_online(data)
else:
dates_stack = []
for t in self.dates:
pdates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H')
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates]
dates_stack.append(dates)
flag = False
for i, pdates in enumerate(dates_stack):
t = self.dates[i]
fname = os.path.join(self.path_nwp_project, self.nwp_model + '_' + t.strftime('%d%m%y') + '.pickle')
if os.path.exists(fname):
nwps = joblib.load(fname)
for date in pdates:
try:
nwp = nwps[date]
if len(nwp['lat'].shape) == 1:
nwp['lat'] = nwp['lat'][:, np.newaxis]
if len(nwp['long'].shape) == 1:
nwp['long'] = nwp['long'][np.newaxis, :]
lats = (np.where((nwp['lat'][:, 0] >= self.area_group[0][0]) & (
nwp['lat'][:, 0] <= self.area_group[1][0])))[0]
longs = (np.where((nwp['long'][0, :] >= self.area_group[0][1]) & (
nwp['long'][0, :] <= self.area_group[1][1])))[0]
lats_group = nwp['lat'][lats]
longs_group = nwp['long'][:, longs]
flag = True
break
except:
continue
if flag:
break
X = self.dataset_for_multiple_farms_online(data, self.areas, lats_group, longs_group)
return X, X_3d
def get_3d_dataset_online(self, utc):
def datetime_exists_in_tz(dt, tz):
try:
dt.tz_localize(tz)
return True
except:
return False
dates_stack = []
if utc:
pdates = pd.date_range(self.dates[-1] + pd.DateOffset(hours=25), self.dates[-1] + pd.DateOffset(hours=48),
freq='H')
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates if dt in self.data.index]
dates_stack.append(dates)
else:
pdates = pd.date_range(self.dates[-1] + pd.DateOffset(hours=25), self.dates[-1] + pd.DateOffset(hours=48),
freq='H')
indices = [i for i, t in enumerate(pdates) if datetime_exists_in_tz(t, tz=timezone('Europe/Athens'))]
pdates = pdates[indices]
pdates = pdates.tz_localize(timezone('Europe/Athens'))
pdates = pdates.tz_convert(timezone('UTC'))
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates]
dates_stack.append(dates)
if not isinstance(self.areas, dict):
arrays = stack_daily_nwps(self.dates[-1], dates_stack[0], self.path_nwp_project, self.nwp_model, self.areas,
self.variables, self.compress, self.static_data['type'])
else:
arrays = stack_daily_nwps(self.dates[-1], dates_stack[0], self.path_nwp_project, self.nwp_model,
self.area_group,
self.variables, self.compress, self.static_data['type'])
X = np.array([])
data_var = dict()
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
data_var[var + '_prev'] = X
data_var[var] = X
data_var[var + '_next'] = X
else:
data_var[var] = X
data_var['dates'] = X
X_3d = np.array([])
nwp = arrays[0]
x_2d = arrays[1]
if x_2d.shape[0] != 0:
for var in nwp.keys():
if var != 'dates':
data_var[var] = stack_3d(data_var[var], nwp[var])
else:
data_var[var] = np.hstack((data_var[var], nwp[var]))
X_3d = stack_3d(X_3d, x_2d)
self.logger.info('NWP data stacked for date %s', arrays[2])
return data_var, X_3d
def dataset_for_single_farm_online(self, data):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat(
[dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour', 'month'])])
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
X0 = np.transpose(data[var + '_prev'], [0, 2, 1])
X0_level0 = X0[:, 2, 2]
X1 = np.transpose(data[var], [0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.pca_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.pca_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.pca_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.pca_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = np.transpose(data[var + '_next'], [0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'flux' if var == 'Flux' else 'wind'
var_sort = 'fl' if var == 'Flux' else 'ws'
col = ['p_' + var_name] + ['n_' + var_name] + [var_name]
col = col + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X0_level0.reshape(-1, 1), X2_level0.reshape(-1, 1), X1_level1.reshape(-1, 1), X1_level3d,
X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = np.transpose(data[var], [0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.pca_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.pca_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.pca_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.pca_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud' if var == 'Cloud' else 'direction'
var_sort = 'cl' if var == 'Cloud' else 'wd'
col = [var_name] + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X1_level1.reshape(-1, 1), X1_level3d, X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
X2 = np.transpose(data[var], [0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'Temp' if var == 'Temperature' else 'wind'
var_sort = 'tp' if var == 'Temperature' else 'ws'
col = [var_name]
X = X2_level0
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
dataset_X = dataset_X
ind = joblib.load(os.path.join(self.path_data, 'dataset_columns_order.pickle'))
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
self.logger.info('Successfully dataset created for training for %s', self.project_name)
return dataset_X
def dataset_for_multiple_farms_online(self, data, areas, lats_group, longs_group):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat(
[dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour', 'month'])])
for var in self.variables:
for area_name, area in areas.items():
if len(area) > 1:
lats = (np.where((lats_group[:, 0] >= area[0, 0]) & (lats_group[:, 0] <= area[1, 0])))[0]
longs = (np.where((longs_group[0, :] >= area[0, 1]) & (longs_group[0, :] <= area[1, 1])))[0]
else:
lats = (np.where((lats_group[:, 0] >= area[0]) & (lats_group[:, 0] <= area[2])))[0]
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
X0 = data[var + '_prev'][:, lats, :][:, :, longs]
X0 = X0.reshape(-1, X0.shape[1] * X0.shape[2])
level = var + '_prev_' + area_name
self.logger.info('Begin PCA training for %s', level)
X0_compressed = self.pca_transform(X0, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = data[var + '_next'][:, lats, :][:, :, longs]
X2 = X2.reshape(-1, X2.shape[1] * X2.shape[2])
level = var + '_next_' + area_name
self.logger.info('Begin PCA training for %s', level)
X2_compressed = self.pca_transform(X2, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'flux_' + area_name if var == 'Flux' else 'wind_' + area_name
var_sort = 'fl_' + area_name if var == 'Flux' else 'ws_' + area_name
col = ['p_' + var_name + '.' + str(i) for i in range(3)]
col += ['n_' + var_name + '.' + str(i) for i in range(3)]
col += [var_name + '.' + str(i) for i in range(9)]
X = np.hstack((X0_compressed, X2_compressed, X1_compressed))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud_' + area_name if var == 'Cloud' else 'direction_' + area_name
var_sort = 'cl_' + area_name if var == 'Cloud' else 'wd_' + area_name
col = [var_name + '.' + str(i) for i in range(9)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
var_sort = 'tp_' + area_name if var == 'Temperature' else 'ws_' + area_name
col = [var_name + '.' + str(i) for i in range(3)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
col = []
col_p = []
col_n = []
for area_name, area in areas.items():
var_name = 'flux_' + area_name if var == 'Flux' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
col_p += ['p_' + var_name + '.' + str(i) for i in range(3)]
col_n += ['n_' + var_name + '.' + str(i) for i in range(3)]
var_name = 'flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
var_name = 'p_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_p].mean(axis=1)
var_name = 'n_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_n].mean(axis=1)
elif var in {'WD', 'Cloud'}:
col = []
for area_name, area in areas.items():
var_name = 'cloud_' + area_name if var == 'Cloud' else 'direction_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
var_name = 'cloud' if var == 'Cloud' else 'direction'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
col = []
for area_name, area in areas.items():
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(3)]
var_name = 'Temp' if var == 'Temperature' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
ind = joblib.load(os.path.join(self.path_data, 'dataset_columns_order.pickle'))
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
self.logger.info('Successfully dataset created for training for %s', self.project_name)
return dataset_X
| [
"joesider9@gmail.com"
] | joesider9@gmail.com |
b90f4ea8a1ded40bd96a10a5aa67dcb65cf04e94 | 163142df55ed85f75a7d22bbe37be42a15361d38 | /testGitGraph.py | cb9ac6d33ba940287fb0ff4f1689995709ffd7c6 | [] | no_license | Iskander508/JIRA-graph | c43a85f9e4b5c8e376f428dee6b16d2e3df0aaa5 | 25ca18d825839f4bd59964a214fd4474532ed0f5 | refs/heads/master | 2020-04-16T23:24:03.961562 | 2016-08-19T11:54:19 | 2016-08-19T12:35:24 | 45,353,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import git
import gitGraph
g = gitGraph.GitGraph(git.GIT(repositoryPath='D:\\otevrenebrno\\otevrenebrno'))
#result = g.checkout(git.Branch('master'))
#result = g.merge(git.Branch('master'))
g.add('master')
g.add('origin/document_parser')
g.add('origin/angular-leaflet-directive')
g.add('fbf15af421249789af73a760da0a9a44041861d2')
g.add('10e2e79be307dac9f1628240f9e4bd3b1402e1c2')
g.add('d1be5303e7cec10fed619c159427120129d16842')
g.add('ef7d45802a4c22e3a43f5c1125c5a48994953123')
print(g)
| [
"pavel.zarecky@seznam.cz"
] | pavel.zarecky@seznam.cz |
bb9e7d7199760f8ffe314a0cf5f8be5ac8ae6737 | 449d2c420958b8f0fa7ad6df656885c17b976090 | /Python Complete course/Simple python/list and tupple.py | c0e0efd69187d28b1b8ad92466f8b38983c5266f | [] | no_license | rehanali4790/Python-Complete-Course | d577f10091d41e1454d2f9521c4107a86e194760 | 526a529f236e03940026816f33e7b0a0971ecaeb | refs/heads/main | 2023-03-27T05:46:43.537739 | 2021-03-23T12:30:06 | 2021-03-23T12:30:06 | 350,698,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | grocerry = ["potato","tomato","ketchup","roll","noodles"]
print(grocerry[::])
#grocerry.sort()
numbers_ = [1,3,89,2,90,67,82]
#numbers_.sort()
#numbers_.reverse()
#print(min(numbers_))
#print(max(numbers_))
#numbers_.append(93)
#numbers_.append(54)
#numbers_.insert(1,2)
#numbers_.insert(0,"numbers")
#numbers_.remove(90)
#numbers_.pop()
#numbers_[0] = "numbers"
#print(numbers_)
#tp = {1,2,3}
#tp.pop()
#print(tp) | [
"noreply@github.com"
] | noreply@github.com |
bf261138ee9f64a8877b605647e8315e62e71feb | 09ce79c538f6cc66a51e9fe0e032a0cb9b24f222 | /test/test_echo_udp.py | 4996930ce743f4acdfa63b485d75f83fad717629 | [] | no_license | dlaperriere/misc_utils | d33007adccbbd08a06ab463e5a39b193d8faac00 | 3e04fdfe0f692d644e43f55a8362f0456c79ee88 | refs/heads/main | 2021-08-02T09:23:28.288183 | 2021-07-31T19:05:49 | 2021-07-31T19:05:49 | 60,727,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 973 | py | #!/usr/bin/env python
"""
Description
Test echo_udp.py script
Note
- works with python 2.7 and 3.6
Author
David Laperriere <dlaperriere@outlook.com>
"""
from __future__ import print_function
import os
import sys
import unittest
sys.path.append(os.path.abspath(""))
sys.path.append(os.path.abspath("../"))
from lib import cmd
#import echo_udp
__version_info__ = (1, 0)
__version__ = '.'.join(map(str, __version_info__))
__author__ = "David Laperriere dlaperriere@outlook.com"
script = "echo_udp.py"
script_path = os.path.join(os.path.abspath(""), script)
class TestEchoUDP(unittest.TestCase):
""" Unit tests for echo_udp.py """
def test_python2(self):
out, status = cmd.run("python2 {} -v".format(script_path))
self.assertEqual(status, 0)
def test_python3(self):
out, status = cmd.run("python3 {} -v".format(script_path))
self.assertEqual(status, 0)
if __name__ == "__main__":
unittest.main()
exit(0)
| [
"dlaperriere@hotmail.com"
] | dlaperriere@hotmail.com |
0f41b4c555162561f877240887369c044b1fe898 | 3d589d1c56b55fbd2b45b03564b8a9442ebf142b | /lib/src/klio/metrics/base.py | 1b50aeb1da57930cc8fba17042c72434460c2eb4 | [
"Apache-2.0"
] | permissive | spotify/klio | 1aff27412e92c9d699259e5ab1eaeb39dc3e9571 | e625565708ed846201d2e05f782c0ce585554346 | refs/heads/develop | 2023-05-25T14:33:28.348335 | 2022-03-23T20:34:09 | 2022-03-23T20:34:09 | 285,928,366 | 815 | 57 | Apache-2.0 | 2023-05-24T21:07:09 | 2020-08-07T22:02:58 | Python | UTF-8 | Python | false | false | 5,765 | py | # Copyright 2019-2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Base classes from which a metrics consumer (i.e. ffwd, logger, etc.)
will need to implement.
New consumers are required to implement the :class:`AbstractRelayClient`, and
three metrics objects based off of :class:`BaseMetric`: a counter, a gauge, and
a timer.
"""
import abc
import six
class _DummyAttribute(object):
# for the ability to do `FOO_ATTR = abstract_attr()` as well as
# decorate a property method
pass
def abstract_attr(obj=None):
"""Set an attribute or a property as abstract.
Supports class-level attributes as well as methods defined as a
``@property``.
Usage:
.. code-block:: python
class Foo(object):
my_foo_attribute = abstract_attr()
@property
@abstract_attr
def my_foo_property(self):
pass
Args:
obj (callable): Python object to "decorate", i.e. a class method. If
none is provided, a dummy object is created in order to attach
the ``__isabstractattr__`` attribute (similar to
``__isabstractmethod__`` from ``abc.abstractmethod``).
Returns object with ``__isabstractattr__`` attribute set to ``True``.
"""
if not obj:
obj = _DummyAttribute()
obj.__isabstractattr__ = True
return obj
def _has_abstract_attributes_implemented(cls, name, bases):
"""Verify a given class has its abstract attributes implemented."""
for base in bases:
abstract_attrs = getattr(base, "_klio_metrics_abstract_attributes", [])
class_attrs = getattr(cls, "_klio_metrics_all_attributes", [])
for attr in abstract_attrs:
if attr not in class_attrs:
err_str = (
"Error instantiating class '{0}'. Implementation of "
"abstract attribute '{1}' from base class '{2}' is "
"required.".format(name, attr, base.__name__)
)
raise NotImplementedError(err_str)
def _get_all_attributes(clsdict):
return [name for name, val in six.iteritems(clsdict) if not callable(val)]
def _get_abstract_attributes(clsdict):
return [
name
for name, val in six.iteritems(clsdict)
if not callable(val) and getattr(val, "__isabstractattr__", False)
]
class _ABCBaseMeta(abc.ABCMeta):
"""Enforce behavior upon implementations of ABC classes."""
def __init__(cls, name, bases, clsdict):
_has_abstract_attributes_implemented(cls, name, bases)
def __new__(metaclass, name, bases, clsdict):
clsdict[
"_klio_metrics_abstract_attributes"
] = _get_abstract_attributes(clsdict)
clsdict["_klio_metrics_all_attributes"] = _get_all_attributes(clsdict)
cls = super(_ABCBaseMeta, metaclass).__new__(
metaclass, name, bases, clsdict
)
return cls
class AbstractRelayClient(six.with_metaclass(_ABCBaseMeta)):
"""Abstract base class for all metric consumer relay clients.
Each new consumer (i.e. ffwd, logging-based metrics)
will need to implement this relay class.
Attributes:
RELAY_CLIENT_NAME (str): must match the key in ``klio-job.yaml``
under ``job_config.metrics``.
"""
RELAY_CLIENT_NAME = abstract_attr()
def __init__(self, klio_config):
self.klio_config = klio_config
@abc.abstractmethod
def unmarshal(self, metric):
"""Returns a dictionary-representation of the ``metric`` object"""
pass
@abc.abstractmethod
def emit(self, metric):
"""Emit the given metric object to the particular consumer.
``emit`` will be run in a threadpool separate from the transform,
and any errors raised from the method will be logged then ignored.
"""
pass
@abc.abstractmethod
def counter(self, name, value=0, transform=None, **kwargs):
"""Return a newly instantiated counter-type metric specific for
the particular consumer.
Callers to the ``counter`` method will store new counter objects
returned in memory for simple caching.
"""
pass
@abc.abstractmethod
def gauge(self, name, value=0, transform=None, **kwargs):
"""Return a newly instantiated gauge-type metric specific for
the particular consumer.
Callers to the ``gauge`` method will store new gauge objects
returned in memory for simple caching.
"""
pass
@abc.abstractmethod
def timer(self, name, transform=None, **kwargs):
"""Return a newly instantiated timer-type metric specific for
the particular consumer.
Callers to the ``timer`` method will store new timer objects
returned in memory for simple caching.
"""
pass
class BaseMetric(object):
"""Base class for all metric types.
A consumer must implement a counter metric, a gauge metric, and a
timer metric.
"""
def __init__(self, name, value=0, transform=None, **kwargs):
self.name = name
self.value = value
self.transform = transform
def update(self, value):
self.value = value
| [
"lynn@spotify.com"
] | lynn@spotify.com |
9084c5e743b26571e62ba65a4df2d3ec5e68700c | a3972cb6ba32abd18b374975f4abd5318bc95f09 | /project/src/yosigy/api/yosigy_list_views.py | 960d32f8f54604c94ee00262c81979094695a2d5 | [] | no_license | ssr03/MiniDelivery | c57bb45e497cab34787473925663ace46dbb6b2d | 659d9757d1f369a6713aa5a66bab2aa5d6381b8e | refs/heads/master | 2020-07-30T15:05:01.401229 | 2019-09-23T11:52:51 | 2019-09-23T11:52:51 | 210,267,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,099 | py | import enum
from datetime import datetime
from django.core.paginator import Paginator
from django.db.models import F, Count
from django.http import JsonResponse
from django.views.generic.base import View
from accounts.mixins import LoginRequiredMixin
from restaurant.api.views import CategoryNum
from yosigy.models import Yosigy
class YosigyListInfo(enum.IntEnum):
POST_TO_SHOW_IN_ONE_PAGE = 4
PAGES_TO_SHOW = 3
class YosigyListAPIView(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
category_id = kwargs['category_id']
today = datetime.now().date()
tab_value = request.GET.get('tab_value', '')
json_data = {}
if kwargs['page']:
self.page = kwargs['page']
if not category_id or category_id == CategoryNum.ALL_ID:
yosigy = (
Yosigy.objects
.select_related('restaurant')
.prefetch_related('yosigymenu_set')
.filter(
restaurant__is_yosigy=True,
deadline__gte=today,
)
.values(
'restaurant',
)
.annotate(
is_yosigy_count=Count('yosigymenu__menu'),
)
.values(
'pk',
'is_yosigy_count',
restaurant_title=F('restaurant__title'),
restaurant_img=F('restaurant__img'),
yosigy_deadline=F('deadline'),
yosigy_notice=F('notice'),
)
.order_by('-created_time')
)
else:
yosigy = (
Yosigy.objects
.select_related('restaurant')
.prefetch_related('yosigymenu_set')
.filter(
restaurant__is_yosigy=True,
deadline__gte=today,
restaurant__category__pk=category_id,
)
.values(
'restaurant',
)
.annotate(
is_yosigy_count=Count('yosigymenu__menu'),
)
.values(
'pk',
'is_yosigy_count',
restaurant_title=F('restaurant__title'),
restaurant_img=F('restaurant__img'),
yosigy_deadline=F('deadline'),
yosigy_notice=F('notice'),
)
.order_by('-created_time')
)
yosigy_set = (
Yosigy.objects
.select_related('restaurant')
.prefetch_related('yosigymenu_set')
.filter(yosigymenu__menu__is_set_menu=True,)
.annotate(
is_set_menu_count=Count('yosigymenu__menu'),
)
.values(
'is_set_menu_count',
'pk',
)
)
for i in yosigy:
for j in yosigy_set:
if i['pk'] == j['pk']:
i['is_set_menu_count'] = j['is_set_menu_count']
yosigy=list(yosigy)
if not yosigy:
json_data = {
'message': '아직 공동 구매할 수 있는 메뉴가 없습니다.',
}
elif tab_value == 'deadline':
yosigy=sorted(yosigy, key=lambda menu:menu['yosigy_deadline'])
json_data = self.yosigy_paginator(yosigy)
json_data['deadline'] = True
elif tab_value == 'all' or tab_value == '':
json_data = self.yosigy_paginator(yosigy)
json_data['all'] = True
return JsonResponse(
json_data
)
def yosigy_paginator(self, yosigy):
paginator = Paginator(yosigy, YosigyListInfo.POST_TO_SHOW_IN_ONE_PAGE)
current_page = paginator.get_page(self.page)
start = (self.page-1) // YosigyListInfo.PAGES_TO_SHOW * YosigyListInfo.PAGES_TO_SHOW + 1
end = start + YosigyListInfo.PAGES_TO_SHOW
last_page = len(paginator.page_range)
if last_page < end:
end = last_page
yosigy_list = current_page.object_list
page_range = range(start, end + 1)
yosigy_list_data = {
'yosigy_list': yosigy_list,
'current_page': {
'has_previous': current_page.has_previous(),
'has_next': current_page.has_next(),
},
'page_range': [page_range[0], page_range[-1]],
}
if current_page.has_previous():
yosigy_list_data['current_page']['previous_page_number'] = current_page.previous_page_number()
if current_page.has_next():
yosigy_list_data['current_page']['next_page_number'] = current_page.next_page_number()
return yosigy_list_data
| [
"43363127+ssr03@users.noreply.github.com"
] | 43363127+ssr03@users.noreply.github.com |
a15f411bdebb64d8ba1c4f8098e1687a6eb1a2a8 | 7f9627073fb7b122155bca377d564af2bc431c53 | /access_to_aws/test.py | 15d1e0be0d64ff640db726535323ce373876269b | [] | no_license | Kevjolly/DataScienceProject | 3e52d60bbc42a355d956092a510057c1d10f9d76 | c80aa2ab3765ff6f9dbe3370246348f2e91c0563 | refs/heads/master | 2020-04-29T17:07:16.410394 | 2019-05-10T15:52:17 | 2019-05-10T15:52:17 | 176,287,613 | 0 | 1 | null | 2019-05-10T15:52:18 | 2019-03-18T13:05:48 | Python | UTF-8 | Python | false | false | 145 | py | from accessAPIdatabase import *
results = getBaseData("mallat", "MALLAT_TEST")
#print(results)
results = getNewData(False, 27)
#print(results)
| [
"kevjolly78@gmail.com"
] | kevjolly78@gmail.com |
2b07e11c085a9b0619fef61a5169fb372e93c5c9 | 8f867bdc00e7e13a926cb0a404d1713384de36ab | /김성수/프로그래머스 LV1/같은 숫자는 싫어.py | 8b2ad254c13d8e9d1f7525d42619eb4e955627bd | [] | no_license | kssgit/Meerithm | 1501c276aaf0fa8c3c9b9edd4c43abe7d24ebc8e | fa979aba09d301fe1731c0df857b461e67482533 | refs/heads/main | 2023-08-26T22:06:47.748519 | 2021-11-08T15:49:37 | 2021-11-08T15:49:37 | 378,855,521 | 0 | 2 | null | 2021-11-08T15:49:37 | 2021-06-21T08:11:31 | Java | UTF-8 | Python | false | false | 250 | py | def solution(arr):
answer = []
num = arr[0]
answer.append(num)
for i in range(1,len(arr)):
if arr[i] != num:
answer.append(arr[i])
num = arr[i]
return answer
arr = [1,1,3,3,0,1,1]
print(solution(arr)) | [
"76714467+kssgit@users.noreply.github.com"
] | 76714467+kssgit@users.noreply.github.com |
961eb495d649c9820ad86fc9b038685ac0e9f0e5 | 3ad18431b18c4847e6324155209ba5b940923dd2 | /noiseProtocol/clientNoise.py | acf6386d93d73fe43fdbc3521b7ce6d036734d89 | [] | no_license | stratumv2/stratumv2 | a3aedc31474ff228d113c797c38a505ea8ceb6a1 | 3e3c730e6d56e73b55dbf1b3647dbe5ef15afd8f | refs/heads/master | 2020-11-24T00:35:22.889702 | 2020-01-23T15:21:03 | 2020-01-23T15:21:03 | 227,884,446 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,451 | py | import socket
from noise.connection import NoiseConnection
sock = socket.socket()
sock.connect(('localhost', 2000))
# Create instance of NoiseConnection, set up to use NN handshake pattern, Curve25519 for
# elliptic curve keypair, ChaCha20Poly1305 as cipher function and SHA256 for hashing.
proto = NoiseConnection.from_name(b'Noise_NN_25519_ChaChaPoly_SHA256')
# Set role in this connection as initiator
proto.set_as_initiator()
# Enter handshake mode
proto.start_handshake()
# Perform handshake - as we are the initiator, we need to generate first message.
# We don't provide any payload (although we could, but it would be cleartext for this pattern).
message = proto.write_message()
# Send the message to the responder - you may simply use sockets or any other way
# to exchange bytes between communicating parties.
sock.sendall(message)
# Receive the message from the responder
received = sock.recv(2048)
# Feed the received message into noise
payload = proto.read_message(received)
# As of now, the handshake should be finished (as we are using NN pattern).
# Any further calls to write_message or read_message would raise NoiseHandshakeError exception.
# We can use encrypt/decrypt methods of NoiseConnection now for encryption and decryption of messages.
encrypted_message = proto.encrypt(b'This is an example payload')
sock.sendall(encrypted_message)
ciphertext = sock.recv(2048)
plaintext = proto.decrypt(ciphertext)
print(plaintext)
| [
"sthiagolg@gmail.com"
] | sthiagolg@gmail.com |
0488cfa80dd088af4130571c9478a61247675251 | bbfb2ab6d23629eba2dd47a007fbfbbe7b5a97b6 | /mask_detection_on_face.py | d60c52dcb5704d2ad78b67ccd588755fc5c9f88e | [] | no_license | nkrjain5/Face_mask_detection | 20a9a6edf8848b52db31ecb9432fd92e0d217647 | 0792b6a4d92730f5875e05a6e474f9e0d7b2f08b | refs/heads/master | 2022-11-22T03:43:21.649375 | 2020-07-22T13:08:27 | 2020-07-22T13:08:27 | 280,350,461 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,875 | py | import cv2
from keras.models import load_model
import numpy as np
import os
model_01=r'J:\Udemy\DL\face_mask_detection_pyimagesearch\Face_mask_detector_nkr\Model\face_mask_model_001.h5'
model_02=r'J:\Udemy\DL\face_mask_detection_pyimagesearch\Face_mask_detector_nkr\Model\face_mask_model_002_50Epochs.h5'
model_03=r'J:\Udemy\DL\face_mask_detection_pyimagesearch\Face_mask_detector_nkr\Model\face_mask_model_003_100Epochs.h5'
model_04=r'J:\Udemy\DL\face_mask_detection_pyimagesearch\Face_mask_detector_nkr\Model\Trained_mode\face_mask_model_001_100Epochs.h5'
model_05=r'J:\Udemy\DL\face_mask_detection_pyimagesearch\Face_mask_detector_nkr\Model\Trained_mode\face_mask_model_002_100Epochs.h5'
model_06=r'J:\Udemy\DL\face_mask_detection_pyimagesearch\Face_mask_detector_nkr\Model\Trained_mode\face_mask_model_003_50Epochs.h5'
model_07=r'J:\Udemy\DL\face_mask_detection_pyimagesearch\Face_mask_detector_nkr\Model\Trained_mode\face_mask_model_004_100Epochs.h5'
model_08=r'J:\Udemy\DL\face_mask_detection_pyimagesearch\Face_mask_detector_nkr\Model\Trained_mode\face_mask_model_005_newDS_100Epochs.h5'
model_09=r'J:\Udemy\DL\face_mask_detection_pyimagesearch\Face_mask_detector_nkr\Model\Trained_mode\face_mask_model_006_newDS_100Epochs.h5'
model_10=r'J:\Udemy\DL\face_mask_detection_pyimagesearch\Face_mask_detector_nkr\Model\Trained_mode\face_mask_model_007_newDS_50Epochs.h5'
model_11=r'J:\Udemy\DL\face_mask_detection_pyimagesearch\Face_mask_detector_nkr\Model\Trained_mode\face_mask_model_008_newDS_50Epochs.h5'
model_12=r'J:\Udemy\DL\face_mask_detection_pyimagesearch\Face_mask_detector_nkr\Model\Trained_mode\face_mask_model_009_newDS_50Epochs.h5'
caffe_mode_path=r'J:\Udemy\DL\Face_detection_caffee\Model\res10_300x300_ssd_iter_140000.caffemodel'
caffe_model_proto=r'J:\Udemy\DL\Face_detection_caffee\Model\deploy.prototxt.txt'
video_out=r'J:\RT_mask_temp_monitor\RT_mask_detection.avi'
def main():
caffemodel=cv2.dnn.readNetFromCaffe(caffe_model_proto,caffe_mode_path)
detect_face=load_model(model_12)
cap=cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(video_out, fourcc, 10.0, (640, 480))
while(cv2.waitKey(1))!=27:
ret,img=cap.read()
if not ret: break
img=cv2.flip(img,1)
(h, w) = img.shape[:2]
print(h,w)
blob = cv2.dnn.blobFromImage(cv2.resize(img, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0))
predictions=caffemodel.forward(caffemodel.setInput(blob))
for i in range(0,15):#predictions.shape[2]): #limiting to 10 detections only
if predictions[0,0,i,2] > 0.75:
box = predictions[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
print(i,(startX+int(startX), startY, endX, endY))
T=startX-int(startX*0.075)
L=startY-int(startY*0.075)
B=endY+int(startY*0.075)
R=endX+int(startX*0.075)
cropped=img[L:B,T:R]
cropped=cv2.resize(cropped,(224,224))
cv2.imshow('cropped',cropped)
cropped=cropped.reshape(1,224,224,3)
cropped=cropped.astype('float32')
cropped/=255
# res=str(detect_face.predict_classes(resized,1,verbose=0)[0][0])
res=detect_face.predict(cropped)[0][0]
print(res)
if res==str(1) or res==str(1.0) or res>0.75:
pred="MASK ON!"
cv2.putText(img, str(pred), (T,L-15) , cv2.FONT_HERSHEY_COMPLEX_SMALL,1, (0,255,0), 2)
cv2.rectangle(img,(T, L),(R,B),(0,255,0),2)
elif res==str(0) or res==str(0.) or res < 0.75:
pred="MASK OFF!"
cv2.putText(img, str(pred), (T,L-15) , cv2.FONT_HERSHEY_COMPLEX_SMALL,1, (0,0,255), 2)
cv2.rectangle(img,(T, L),(R,B),(0,0,255),2)
# cv2.putText(img, str(res), (T+30,L-15) , cv2.FONT_HERSHEY_COMPLEX_SMALL,1, (0,0,255), 2)
cv2.imshow('live',img)
out.write(img)
cv2.destroyAllWindows()
cap.release()
if __name__=='__main__':
main() | [
"noreply@github.com"
] | noreply@github.com |
09511399f74ac3c8cf5b85cbba3f50c397a45a36 | 3c6595ed12131dfbf1e15ec0e7cebfb19d418761 | /tests/models/torch/handler_financialNet_NoReference.py | 901572a8b507531f099f1d436faa244a04afe5de | [
"MIT"
] | permissive | RedisAI/aibench | c4d3e742824df85ec9e839b8b0e94c5668aa18c0 | bf0ac872c0fa25754ecacd1322ff9d2f2146fb53 | refs/heads/master | 2023-07-24T03:22:59.587178 | 2023-07-13T07:18:20 | 2023-07-13T07:18:20 | 201,507,583 | 18 | 3 | MIT | 2023-09-07T15:05:06 | 2019-08-09T16:48:13 | Go | UTF-8 | Python | false | false | 3,377 | py | # custom service file
# model_handler.py
# https://pytorch.org/serve/custom_service.html
# https://pytorch.org/serve/logging.html
# https://pytorch.org/serve/server.html
# torch-model-archiver --model-name financialNet_NoReferenceTorch --version 1 --serialized-file torchFraudNetNoRef.pt --handler handler_financialNet_NoReference.py
# torchserve --start --model-store . --models financial=financialNet_NoReferenceTorch.mar --ts-config config.properties --log-config log4j.properties
"""
ModelHandler defines a base model handler.
"""
import io
import logging
import numpy as np
import os
import torch
import json
import array
logger = logging.getLogger(__name__)
class ModelHandler(object):
"""
A base Model handler implementation.
"""
def __init__(self):
self.error = None
self._context = None
self.model=None
self._batch_size = 0
self.device = None
self.initialized = False
def initialize(self, context):
"""
Initialize model. This will be called during model loading time
:param context: Initial context contains model server system properties.
:return:
"""
self._context = context
properties = context.system_properties
self._batch_size = properties["batch_size"]
self.device = torch.device("cuda:" + str(properties.get("gpu_id")) if torch.cuda.is_available() else "cpu")
model_dir = properties.get("model_dir")
# Read model serialize/pt file
model_pt_path = os.path.join(model_dir, "torchFraudNetNoRef.pt")
self.model = model = torch.jit.load(model_pt_path)
self.initialized = True
def preprocess(self, batch):
"""
Transform raw input into model input data.
:param batch: list of raw requests, should match batch size
:return: list of preprocessed model input data
"""
# Take the input data and pre-process it make it inference ready
# assert self._batch_size == len(batch), "Invalid input batch size: {}".format(len(batch))
return batch
def inference(self, model_input):
response = {'outputs': None }
"""
Internal inference methods, checks if the input data has the correct format
:param model_input: transformed model input data
:return: list of inference output
"""
if 'body' in model_input[0]:
body = model_input[0]['body']
if 'transaction' in body:
transaction_data = np.array(body['transaction'], dtype=np.float32).reshape(1, 30)
torch_tensor1 = torch.from_numpy(transaction_data)
with torch.no_grad():
out = self.model(torch_tensor1).numpy().tolist()
response['outputs']=out[0]
return response
def postprocess(self, inference_output):
# Take output from network and post-process to desired format
return [inference_output]
def handle(self, data, context):
model_input = self.preprocess(data)
model_out = self.inference(model_input)
return self.postprocess(model_out)
_service = ModelHandler()
def handle(data, context):
if not _service.initialized:
_service.initialize(context)
if data is None:
return None
return _service.handle(data, context) | [
"noreply@github.com"
] | noreply@github.com |
3a155f5c1b2e5c5d7c96cfb5ddf83343e7bc6ea4 | 621843ab71f3cb889fd8086d6fa999fab815006b | /Gueess while.py | 125cfb09744e5a8b1c69b257bfa861ea1491bc90 | [] | no_license | Mats91/Vaja2 | bee7a92d87e817ffa0039befee407b6aa498cc71 | fcccbc8cc5b264c67d0100b0dd5aaabe5a895459 | refs/heads/master | 2020-03-12T16:45:18.809919 | 2018-04-23T16:08:47 | 2018-04-23T16:08:47 | 130,723,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | secret = 22
ponovi = True
while ponovi:
guess = int(raw_input("Guess the secret number (between 1 and 30): "))
if guess == secret:
print "You guessed it - congratulations! It's number %s :)" % secret
# ponovi = False
break
else:
print "Sorry, your guess is not correct... Secret number is not {0}".format(guess)
nadaljuj = raw_input("Ponovi?(DA/NE)")
if nadaljuj.lower().strip() != 'da':
ponovi = False
print "Ne bom ponovil"
print "End" | [
"grcar.matjaz@gmail.com"
] | grcar.matjaz@gmail.com |
e27dc99b272d488e3fcae2b5bc6d125b61ef2b29 | f41ed81206a072dc3a620446acca69b52e077702 | /run/runner/contentgen_runner.py | 6e17cdf859b8f471787ae6f827049ea8de52d348 | [
"MIT"
] | permissive | dngu7/honeycode | 0f56630d9ae5a472a87ade78f33bd072dbbd2a19 | d7a00408f8370f3fea8ca1a94b239f3e4d842a7d | refs/heads/master | 2023-03-23T14:37:14.916877 | 2021-03-16T07:17:46 | 2021-03-16T07:17:46 | 277,994,349 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,408 | py | import importlib.util
import logging
import os
import time
import random
import string
import pickle
import networkx as nx
import numpy as np
import torch
from tqdm import tqdm
from utils.read_config import read_config
from utils.train_helper import load_model
logger = logging.getLogger('gen')
class ContentgenRunner(object):
def __init__(self, config, model_config_branch, model_name='contentgen'):
logger.debug("{} initialized".format(__name__))
self.model_name = model_name
self.config = config
self.use_gpu = config.use_gpu
self.gpus = config.gpus
self.device = config.device
self.seed = config.seed
self.random_gen = random.Random(self.seed)
self.model_dir = os.path.join(self.config.models_dir, model_name)
#config
self.model_config_path = os.path.join(self.model_dir, 'config.yaml')
assert os.path.exists(self.model_config_path), "Invalid config file: {}".format(self.model_config_path)
self.model_config = read_config(self.model_config_path)
self.batch_size = 1
self.temperature = model_config_branch.temperature
self.max_gen_len = model_config_branch.max_gen_len
self.save_sample = model_config_branch.save_sample
self.file_exts = self.model_config.dataset.file_ext
self.seq_len = self.model_config.dataset.seq_len
self.end_token = self.model_config.dataset.end_token
self.all_letters = list(string.printable) + [self.end_token]
self.n_letters = len(self.all_letters) + 1 #EOS MARKER
#snapshot
self.model_snapshot = os.path.join(self.model_dir, model_config_branch.model_snapshot)
assert os.path.exists(self.model_snapshot), "Invalid snapshot: {}".format(self.model_snapshot)
#architecture
self.model_file = model_config_branch.model_file
self.model_arch = os.path.join(self.model_dir, self.model_file)
assert os.path.exists(self.model_arch), "Invalid arch: {}".format(self.model_arch)
#initialize module and model
model_object = self.model_config.model.model_name
spec = importlib.util.spec_from_file_location(
model_object, self.model_arch
)
model_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(model_module)
init_method = getattr(model_module, model_object)
self.model_func = init_method(self.model_config, self.n_letters-1, self.seq_len)
#load checkpoints
load_model(self.model_func, self.model_snapshot, self.device)
def tochar(self, tensor_idx):
all_char = []
for t in tensor_idx:
t = t.squeeze().detach().item()
all_char.append(self.all_letters[t])
return all_char
def eval(self, ext, start_string=None, name='contentgen'):
eval_time = time.time()
self.model_func.to(self.device)
self.model_func.eval()
#start string is a random choice (except EOS)
if start_string == None:
start_string = self.random_gen.choice(self.all_letters[10:62] + ['#', ' '])
text_generated = []
ext_eval = torch.LongTensor([self.file_exts.index(ext)])
ext_eval = ext_eval.pin_memory().to(0, non_blocking=True)
input_eval = torch.LongTensor([self.all_letters.index(s) for s in start_string]).view(1, -1)
input_eval = input_eval.pin_memory().to(0,non_blocking=True)
hidden = self.model_func.initHidden().pin_memory().to(0,non_blocking=True)
with torch.no_grad():
for i in range(self.max_gen_len):
pred, hidden = self.model_func(ext_eval, input_eval, hidden)
pred = pred[0].squeeze()
pred = pred / self.temperature
m = torch.distributions.Categorical(logits=pred)
pred_id = m.sample()
if i == 0 and len(start_string) > 1:
pred_id = pred_id[-1]
#print(i, pred_id)
next_char = self.all_letters[pred_id.item()]
text_generated.append(next_char)
input_eval = pred_id.view(-1,1)
full_string = start_string + ''.join(text_generated)
full_string = full_string.encode('utf-8')
if self.save_sample:
save_name = os.path.join(self.config.config_save_dir, 'sample_{}.{}'.format(time.time(), ext))
with open(save_name, 'wb') as f:
f.write(full_string)
logger.debug("Generated content for {} [{:2.2f} s]".format(name, time.time() - eval_time))
#logger.debug("Generated {:2} filenames ({:.2f} s)".format(req_nodes, time.time() - eval_time))
return full_string
| [
"5498902-dngu7@users.noreply.gitlab.com"
] | 5498902-dngu7@users.noreply.gitlab.com |
8f67823a6192eab002fdc019bde38a401808622d | 0d5ed56bb75a3f57944dd0e06fef2cc8fde8d1c8 | /facial_landmarks_normalized_visualization_more_images.py | 971331903254710fd09d2dd1013621cbc529b7d8 | [
"MIT"
] | permissive | matejcrnac/Face_deidentification_kazemi | 378cae1310648c5e2329f0af2c03e68a7f09a6b9 | 434fff49b5a7b555bb3c56a62915fd502e0b75f9 | refs/heads/master | 2020-03-08T19:59:46.373323 | 2018-06-18T17:51:42 | 2018-06-18T17:51:42 | 128,370,343 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,361 | py | #input one image
#detect facial landmarks
#show image with detected facial landmarks
#show image with normalized facial landmarks
import cv2
import numpy as np
from FacialLandmarkDetection import *
from Database_loader import *
#Method is used to get paths for template images
def getTemplatePaths(templates_folder, extension):
fileNames = []
for root, dirs, files in os.walk(templates_folder):
for file in files:
if file.endswith(extension):
fName = os.path.join(root, file)
fileNames.append(fName)
fileNames = sorted(fileNames)
return fileNames
def unnormalized_facial_landmarks_detect(imagePath):
detector = FacialLandmarkDetector(imagePath)
image_original = detector.getImage()
shape = image_original.shape
#get normalized landmarks on black background
image_landmarks_norm = detector.detectFacialLandmarks(draw=False, normalize=True)
image_orig_black_white_norm = np.zeros((int(shape[0]/3), int(shape[1]/3)), dtype=np.float64)
max_shape = np.max(shape)
for position in image_landmarks_norm:
x = ((position[0] * (max_shape/8))+max_shape/8).astype(np.int32)
y = ((position[1] * (max_shape/8))+max_shape/8).astype(np.int32)
cv2.circle(image_orig_black_white_norm,(x,y), 1, (1,1,1), -1)
return image_orig_black_white_norm
#shows image inside a windows
def showImage_more(img,text, gray=False):
if gray==True:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.putText(img,text , (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 255, 0), 1, cv2.LINE_AA)
window_name = "window_" + text
cv2.imshow(window_name,img)
#cv2.waitKey(0)
if __name__ == "__main__":
templates_database = "/home/matej/Diplomski/baze/Templates/baza_templates"
imagePath_same1 = "/home/matej/Diplomski/baze/baze_original/baza_XMVTS2/000/000_1_1.ppm"
imagePath_same2 = "/home/matej/Diplomski/baze/baze_original/baza_XMVTS2/003/003_1_1.ppm"
imagePath_same3 = "/home/matej/Diplomski/baze/baze_original/baza_XMVTS2/004/004_1_1.ppm"
imagePath_same4 = "/home/matej/Diplomski/baze/baze_original/baza_XMVTS2/041/041_1_1.ppm"
imagePath_same5 = "/home/matej/Diplomski/baze/baze_original/baza_XMVTS2/134/134_1_1.ppm"
image_path_man_no_glasses = "/home/matej/Diplomski/baze/deidentification_database/Deidentification_main/man_no_glasses/143_1_1.ppm"
image_path_man_glasses = "/home/matej/Diplomski/baze/deidentification_database/Deidentification_main/man_glasses/113_1_1.ppm"
image_path_woman_no_glasses = "/home/matej/Diplomski/baze/deidentification_database/Deidentification_main/woman_no_glasses/154_1_1.ppm"
image_path_woman_glasses = "/home/matej/Diplomski/baze/deidentification_database/Deidentification_main/woman_glasses/250_1_1.ppm"
imagePath = image_path_man_glasses #chose image to use!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
template_paths = getTemplatePaths(templates_database, extension="ppm")
k_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for k in k_list:
image_orig_black_white_norm = unnormalized_facial_landmarks_detect(imagePath = template_paths[k-1])
showImage_more(img=image_orig_black_white_norm, text=str(k) + "- image", gray=False)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"matej.crnac@gmail.com"
] | matej.crnac@gmail.com |
f1f9eb86896567cf61cf6a0519daf39c6107a071 | 67214cd9c88977ab92e63acd772c08782885ff3d | /201-250/209.py | 2e851c9f98ced9d7d37a285997658cef4b8b040d | [] | no_license | AaronJny/leetcode | cdf6ea3605aef6f97d5a45524fdc75a8ed1e1500 | 989b6ae678f9aa92a7400f6c67bbfedf31465315 | refs/heads/master | 2020-04-27T20:54:51.826033 | 2019-11-06T06:40:50 | 2019-11-06T06:40:50 | 174,675,641 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | class Solution:
def minSubArrayLen(self, s: int, nums: list) -> int:
min_len = 0
if not nums:
return 0
length = len(nums)
l = 0
_sum = 0
for i in range(length):
# 如果小于,就加进去
r = i + 1
_sum += nums[i]
for j in range(l, r):
if _sum - nums[j] >= s:
l += 1
_sum -= nums[j]
else:
break
if _sum >= s and (min_len == 0 or min_len > r - l):
min_len = r - l
return min_len | [
"Aaron__7@163.com"
] | Aaron__7@163.com |
c13eb660a22f3919107137447a037101a0066dbd | 7e56f3f287352b6fd2d3a153202299f6c86b226a | /models/build_lr_model_tfidf_unigrams.py | 79d54443b02a1e599dfa5850ab6acc56f361c02e | [] | no_license | aschams/ImprovingYelp | f99884213a6338c0a707af5a6114dc24805fdc06 | 3f750d1c7cb824948368b712a53f5075e2b7c4d7 | refs/heads/master | 2020-07-27T00:15:10.018948 | 2019-09-16T20:26:48 | 2019-09-16T20:26:48 | 208,806,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,494 | py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, mean_absolute_error, recall_score, precision_score, average_precision_score, accuracy_score
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from itertools import compress
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import xgboost as xgb
from scipy import sparse
import pickle
data = pd.read_csv("/users/aschams/scratch/Complete_reviews.csv")
data_sample = data.sample(frac = 0.1, random_state = 4919)
data_text = data_sample['text'].copy()
stars = data_sample['stars'].copy()
stars = stars - 1
data_sample['compound'] = data_sample['compound'] + 1
data_sample.drop(['Unnamed: 0', 'Unnamed: 0.1', 'business_id', "date", 'text',
'review_id', 'stars', 'user_id', 'name', 'Unnamed: 0_y',
'postal_code', "BestNights_business_id", "Music", 'latitude',
'longitude'],
axis = 1,
inplace = True)
data_sample.rename({'stars.1': 'avg_stars'}, axis=1, inplace =True)
data_sample = np.nan_to_num(data_sample)
data_sparse = sparse.csr_matrix(data_sample.astype(float))
vectorizer = TfidfVectorizer(stop_words = "english",
max_df = 0.7,
min_df = .001,
ngram_range = (1,1),
token_pattern = '[A-Za-z][A-Za-z]+')
tfidf = vectorizer.fit_transform(data_text)
full_sparse_matrix = sparse.hstack([data_sparse, tfidf])
print("Length of Vocabulary: " + str(len(vectorizer.get_feature_names())))
X_train, X_test, y_train, y_test = train_test_split(full_sparse_matrix, stars, test_size = 0.4, random_state = 70)
LR_clf = LogisticRegression(C=2, class_weight=None, dual=False, fit_intercept=True,
intercept_scaling=1, max_iter=10000, multi_class='multinomial',
n_jobs=None, penalty='l2', random_state=50, solver='newton-cg',
tol=0.005, verbose=0, warm_start=False)
LR_clf.fit(X_train, y_train)
LR_preds = LR_clf.predict(X_test)
print("Logistic Regression Performance: ")
print( "Accuracy: "+ str(accuracy_score(LR_preds, y_test)))
| [
"acschams@gmail.com"
] | acschams@gmail.com |
c35d5a098aa830d49c8bd31d6edc966e800e7473 | 98c70027eaa8881149ad700bc75bb9df75be3473 | /node_modules/scrypt/build/config.gypi | f0fab75f9945a3b8c7ef98c27865669a9a00d20b | [] | no_license | Fatima-yo/genevieve-bountypayment | 4f6406cad22f85bc6e123d13f5af7974950b1b49 | 5a5815628a79dc587d6c5678e3e02025d0036dc8 | refs/heads/master | 2020-03-16T03:31:53.321206 | 2018-05-07T16:49:36 | 2018-05-07T16:49:36 | 132,489,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,092 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"coverage": "false",
"debug_devtools": "node",
"debug_http2": "false",
"debug_nghttp2": "false",
"force_dynamic_crt": 0,
"gas_version": "2.23",
"host_arch": "x64",
"icu_data_file": "icudt60l.dat",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt60l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "60",
"llvm_version": 0,
"node_byteorder": "little",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 59,
"node_no_browser_globals": "false",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_bundled_v8": "true",
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "so.59",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "false",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"nodedir": "/home/jaime/.node-gyp/9.4.0",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"ham_it_up": "",
"legacy_bundling": "",
"sign_git_tag": "",
"user_agent": "npm/5.6.0 node/v9.4.0 linux x64",
"always_auth": "",
"bin_links": "true",
"key": "",
"allow_same_version": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"if_present": "",
"init_version": "1.0.0",
"user": "",
"prefer_online": "",
"force": "",
"only": "",
"read_only": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"tag_version_prefix": "v",
"cache_max": "Infinity",
"timing": "",
"userconfig": "/home/jaime/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/tmp",
"depth": "Infinity",
"package_lock_only": "",
"save_dev": "",
"usage": "",
"metrics_registry": "https://registry.npmjs.org/",
"otp": "",
"package_lock": "true",
"progress": "true",
"https_proxy": "",
"save_prod": "",
"cidr": "",
"onload_script": "",
"sso_type": "oauth",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"dry_run": "",
"prefix": "/usr/local",
"scope": "",
"browser": "",
"cache_lock_wait": "10000",
"ignore_prepublish": "",
"registry": "https://registry.npmjs.org/",
"save_optional": "",
"searchopts": "",
"versions": "",
"cache": "/home/jaime/.npm",
"send_metrics": "",
"global_style": "",
"ignore_scripts": "",
"version": "",
"local_address": "",
"viewer": "man",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"prefer_offline": "",
"color": "true",
"fetch_retry_mintimeout": "10000",
"maxsockets": "50",
"offline": "",
"sso_poll_frequency": "500",
"umask": "0002",
"fetch_retry_maxtimeout": "60000",
"logs_max": "10",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"access": "",
"also": "",
"save": "true",
"unicode": "true",
"long": "",
"production": "",
"searchlimit": "20",
"unsafe_perm": "true",
"auth_type": "legacy",
"node_version": "9.4.0",
"tag": "latest",
"git_tag_version": "true",
"commit_hooks": "true",
"script_shell": "",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"save_exact": "",
"strict_ssl": "true",
"dev": "",
"globalconfig": "/usr/local/etc/npmrc",
"init_module": "/home/jaime/.npm-init.js",
"parseable": "",
"globalignorefile": "/usr/local/etc/npmignore",
"cache_lock_retries": "10",
"searchstaleness": "900",
"node_options": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"json": ""
}
}
| [
"marcocastiglionem@gmail.com"
] | marcocastiglionem@gmail.com |
ac6e83fc490c1b1534068dd263f2e814cae0be01 | 9f667f39977ff32a358b2c8c4aaba4d6bb6af280 | /Lista2/Questão 7.py | d17fc5502f25d231077411f7b60ecdfe3f2d536c | [] | no_license | lucasdovale/ListasCES-22 | af2159b2b87f7567784427d391d549d008445547 | abd695b53449d459169025124c4df66c251f6745 | refs/heads/main | 2023-01-07T07:08:57.163455 | 2020-11-14T22:23:19 | 2020-11-14T22:23:19 | 312,895,224 | 0 | 0 | null | 2020-11-14T22:23:21 | 2020-11-14T20:20:05 | null | UTF-8 | Python | false | false | 589 | py | # CES-22: Programação Orientada à Objeto
# Professor: Yano
# Código utilizado para Questão 7 da Lista 2 de Python
# Escrito por Lucas do Vale Bezerra, COMP-22
class Point:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def reflect_x(self):
self.y = -self.y
return (self.x, self.y)
def slope_from_origin(self):
return (self.y/self.x)
def get_line_to(self, pt):
m = (pt.y - self.y)/(pt.x - self.x)
c = (self.y - m * self.x)
return (m, c)
p = Point(4, 11)
q = Point(6, 15)
print(p.get_line_to(q)) | [
"lucasdovale.t22@gmail.com"
] | lucasdovale.t22@gmail.com |
b51e6caa09f683cec6c8f09fb1aca60e73ec36f0 | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flasharray/FA_2_25/models/resource_performance_no_id_by_array_get_response.py | dc59892d20f80e34fb26c25e0f59584a263ca562 | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 6,240 | py | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.25
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_25 import models
class ResourcePerformanceNoIdByArrayGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'more_items_remaining': 'bool',
'total_item_count': 'int',
'continuation_token': 'str',
'items': 'list[ResourcePerformanceNoIdByArray]',
'total': 'list[ResourcePerformanceNoIdByArray]'
}
attribute_map = {
'more_items_remaining': 'more_items_remaining',
'total_item_count': 'total_item_count',
'continuation_token': 'continuation_token',
'items': 'items',
'total': 'total'
}
required_args = {
}
def __init__(
self,
more_items_remaining=None, # type: bool
total_item_count=None, # type: int
continuation_token=None, # type: str
items=None, # type: List[models.ResourcePerformanceNoIdByArray]
total=None, # type: List[models.ResourcePerformanceNoIdByArray]
):
"""
Keyword args:
more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved.
total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned.
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified).
items (list[ResourcePerformanceNoIdByArray]): Performance data, broken down by array. If `total_only=true`, the `items` list will be empty.
total (list[ResourcePerformanceNoIdByArray]): The aggregate value of all items after filtering. Where it makes more sense, the average value is displayed instead. The values are displayed for each field where meaningful.
"""
if more_items_remaining is not None:
self.more_items_remaining = more_items_remaining
if total_item_count is not None:
self.total_item_count = total_item_count
if continuation_token is not None:
self.continuation_token = continuation_token
if items is not None:
self.items = items
if total is not None:
self.total = total
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ResourcePerformanceNoIdByArrayGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ResourcePerformanceNoIdByArrayGetResponse`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ResourcePerformanceNoIdByArrayGetResponse`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ResourcePerformanceNoIdByArrayGetResponse`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ResourcePerformanceNoIdByArrayGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResourcePerformanceNoIdByArrayGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"noreply@github.com"
] | noreply@github.com |
c55373049933fb9294b6719e84950c730ff15db0 | e3c505f0c0029460c29166d5bcb5843a0a3dceaa | /supportportal/apps/loggers/models.py | 3cdf171e7a7ccd3ff84924126cec57c39c615a1a | [] | no_license | bensnyde/django-csa | 4db742abd95dec6780a39531cef268a4da5e662f | 17a341332f6908c75ce060f55a145a76c9db48f7 | refs/heads/master | 2021-01-01T19:07:33.040642 | 2014-10-22T21:43:46 | 2014-10-22T21:43:46 | 22,216,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,843 | py | from django.conf import settings
from django.db import models
from datetime import datetime
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
import json
from django.dispatch import receiver
from django.contrib.auth.signals import user_logged_in, user_logged_out
from django.utils.timesince import timesince
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
class ActionLogger(models.Model):
actor = models.ForeignKey(settings.AUTH_USER_MODEL)
verb = models.CharField(max_length=64)
obj = models.CharField(max_length=256, blank=False, null=False)
parent = models.CharField(max_length=256)
timestamp = models.DateTimeField(default=datetime.now)
class Meta:
ordering = ('-timestamp', )
def log(self, actor, verb, obj, parent=None):
ActionLogger.objects.create(actor=actor, verb=verb, obj=obj, parent=parent)
def dump_to_dict(self):
action = "%s %s" % (self.verb, self.obj)
if self.parent:
action = "%s on %s" % (action, self.parent)
return {
'actor_id': self.actor.pk,
'actor': self.actor.get_full_name(),
'timestamp': self.timesince(),
'action': action
}
def __unicode__(self):
response = "%s %s %s" % (self.actor, self.verb, self.obj)
if self.parent:
response = response + " on %s" % self.parent
return response
def timesince(self, now=None):
return timesince(self.timestamp, now)
class RequestLogger(models.Model):
timestamp = models.DateTimeField(default=datetime.now)
uri = models.URLField(max_length=256)
ip = models.IPAddressField()
user_agent = models.CharField(max_length=256)
request_method = models.CharField(max_length=16)
get = models.CharField(max_length=256)
post = models.CharField(max_length=256)
cookies = models.CharField(max_length=256)
class Meta:
abstract = True
def __unicode__(self):
return "%s request to %s @ %s" % (self.ip, self.uri, self.timestamp)
def log(self, request):
Request.objects.create(
uri=request.build_absolute_uri(),
ip=get_client_ip(request),
user_agent=request.META['HTTP_USER_AGENT'],
request_method=request.META['REQUEST_METHOD'],
post=json.dumps(request.POST),
get=json.dumps(request.GET),
cookies=json.dumps(request.COOKIES)
)
def timesince(self, now=None):
return timesince(self.timestamp, now)
class AuthenticationLogger(RequestLogger):
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=False, blank=False)
category = models.CharField(max_length=16, null=False, blank=False)
def dump_to_dict(self):
return {
'user': self.user.get_full_name(),
'category': self.category,
'ip': self.ip,
'user_agent': self.user_agent,
'timestamp': self.timesince()
}
def log(self, request, user, category):
AuthenticationLogger.objects.create(
user=request.user,
category=category,
ip=get_client_ip(request),
user_agent=request.META['HTTP_USER_AGENT'],
)
def __unicode__(self):
return "%s %s from %s @ %s" % (self.user, self.category, self.ip, self.timestamp)
@receiver(user_logged_in)
def log_login(sender, request, user, **kwargs):
AuthenticationLogger().log(request, user, "Login")
@receiver(user_logged_out)
def log_logout(sender, request, user, **kwargs):
AuthenticationLogger().log(request, user, "Logout")
| [
"introspectr3@gmail.com"
] | introspectr3@gmail.com |
2682ec078d2d665c54515022a6840ddf88168001 | 7a1f6f1aae43b219cd34c3c9b907923fb839e6f5 | /Python/Udemy/FXTRADE/pyfxtrading/pyfxtrading/28/app/controllers/webserver.py | bbf2ff35ce8221762754b16b7b6dd096ee8484a4 | [] | no_license | amanoman/amanoman.github.io | b5afc80e0e49ed15db793e2ebf69003c05ab8ce0 | 141c928f6d1df0389859f663f6439d327d4c32d6 | refs/heads/master | 2023-05-28T07:22:09.735409 | 2021-03-31T15:00:14 | 2021-03-31T15:00:14 | 187,139,297 | 0 | 1 | null | 2023-05-22T23:37:24 | 2019-05-17T03:19:36 | Jupyter Notebook | UTF-8 | Python | false | false | 543 | py | from flask import Flask
from flask import render_template
import settings
app = Flask(__name__, template_folder='../views')
@app.teardown_appcontext
def remove_session(ex=None):
from app.models.base import Session
Session.remove()
@app.route('/')
def index():
app.logger.info('index')
return render_template('./google.html',
word='World')
def start():
# app.run(host='127.0.0.1', port=settings.web_port, threaded=True)
app.run(host='0.0.0.0', port=settings.web_port, threaded=True)
| [
"amntkykblog@gmail.com"
] | amntkykblog@gmail.com |
8ce7e115fc41ef8a70c24743cb6d49ce19361cf2 | 248ebd4dc0dbb675a898efac63d0c0a8ffa49ab4 | /Python/Basics/Python Exercises .Simple Calculations/USDtoBGN.py | 31b77e380f0df05d9d5631521988423cce46ff12 | [] | no_license | Karinacho/SoftUni | 002de33a947f95d2c06cf6e4b5a2d625ebe3199c | 4e0b72e4a59c7292a3bd44a36d80996e1207e054 | refs/heads/master | 2021-04-05T23:37:42.453007 | 2019-05-10T21:11:43 | 2019-05-10T21:11:43 | 125,027,319 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | USD = float(input())
currency = USD*1.79549
print(f"{currency:.2f} BGN")
| [
"kreativen_dom@abv.bg"
] | kreativen_dom@abv.bg |
2dd09cf0b1134b3972740048402bc6e9ee1c97be | 1ece1faa638f85c567fdb237c67340501f86f89e | /model/model_builder.py | 5bc0acb8d41370c2b1905ff26fb7f1070790eb67 | [] | no_license | seasa2016/transformer_random | 54223ee5b04a4563c7903d925436d843b8cf7f1c | e3e13c9a2ddc49558d8e991427a974848a850b9c | refs/heads/master | 2020-04-02T12:21:28.167673 | 2019-03-19T03:45:00 | 2019-03-19T03:45:00 | 154,429,913 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,030 | py | import torch
import torch.nn as nn
from torch.nn.init import xavier_uniform_,xavier_normal_
from .module.Embedding import Embedding
from .util.Logger import logger
from . import Constant
from . import transformer
def build_embedding(opt,word_dict,max_len,for_encoder=True,dtype='sum',tag=None):
if(for_encoder):
embedding_dim = opt.src_word_vec_size
else:
embedding_dim = opt.tar_word_vec_size
#print(Constant.PAD_token)
word_padding_idx = word_dict[Constant.PAD_token]
num_word_embedding = len(word_dict)
# num_word,max_len,emb_dim,feature_dim,dropout=0,dtype='sum'
return Embedding(num_word= num_word_embedding,
max_len = max_len,
emb_dim = embedding_dim,
feature_dim = embedding_dim,
padding_idx = word_padding_idx,
dropout = opt.dropout,
dtype = dtype,tag=tag)
def build_encoder(opt,src_dict,tag_dict):
"""
function to build the encoder
"""
max_len = 128
src_embedding = build_embedding(opt,src_dict,max_len,tag=tag_dict)
return transformer.Encoder( opt.enc_layer,opt.num_head,
opt.model_dim,opt.nin_dim_en,
opt.dropout,src_embedding)
def build_decoder(opt,tar_dict):
"""
function to build the decoder
"""
max_len = 128
tar_embedding = build_embedding(opt,tar_dict,max_len,for_encoder=False,dtype=opt.decode_pos)
return transformer.Decoder(
opt.dec_layer,opt.num_head,
opt.model_dim,opt.nin_dim_de,len(tar_dict),max_len,
opt.self_attn_type,opt.dropout,tar_embedding
)
def load_test_model(opt,model_path=None,mode=False):
"""
use the method the acquire the data_dict and the model
"""
if model_path is None:
if(opt.test_from is None):
raise ValueError('test_from shouble not be None')
model_path = opt.test_from
checkpoint = torch.load(model_path)
data_new = dict()
for t in ['source','target','tag']:
data_new[t] = dict()
with open('./{0}/subword.{1}'.format(opt.data,t)) as f_in:
for i,word in enumerate(f_in):
if(t=='source'):
data_new[t][word.strip()[1:-1]] = i
else:
data_new[t][word.strip()+'_'] = i
if(mode == False):
model = build_base_model(checkpoint['opt'],opt, data_new, torch.cuda.is_available(),checkpoint)
else:
#build_model_pre(opt,opt,data_ori,data_new,True,checkpoint=checkpoint)
model = build_base_model(opt,opt,data_new,True,checkpoint=checkpoint)
model.load_state_dict(checkpoint['model'])
model.eval()
return model, opt
def build_base_model(model_opt,opt,data_token,gpu,checkpoint=None,dtype=None):
"""
build the base model
"""
if('tag' in data_token):
encoder = build_encoder(model_opt,data_token['source'],len(data_token['tag']))
else:
encoder = build_encoder(model_opt,data_token['source'],None)
logger.info("finish build encoder")
decoder = build_decoder(model_opt,data_token['target'])
logger.info("finish build decoder")
device = torch.device("cuda" if gpu else "cpu")
model = transformer.Transformer(encoder,decoder)
#print(model)
n_params = sum([p.nelement() for p in model.parameters()])
enc = 0
dec = 0
for name, param in model.named_parameters():
if 'encoder' in name:
enc += param.nelement()
elif 'decoder' or 'generator' in name:
dec += param.nelement()
print("the size will be {0} {1} {2}".format(n_params,enc,dec))
if(checkpoint is not None):
logger.info('loading model weight from checkpoint')
model.load_state_dict(checkpoint['model'])
else:
if(model_opt.param_init != 0.0):
for p in model.parameters():
if(p.requires_grad):
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
if(model_opt.param_init_glorot):
for p in model.parameters():
if(p.requires_grad):
if p.dim() > 1:
xavier_normal_(p)
model.to(device)
logger.info('the model is now in the {0} mode'.format(device))
return model
def change(model_opt,opt,model,data_new):
"""
change the decoder and lock the grad for the encoder
"""
model.decoder = build_decoder(opt,data_new['target'])
#update the parameter
model_opt.tar_word_vec_size = opt.tar_word_vec_size
model_opt.dropout = opt.dropout
model_opt.dec_layer = opt.dec_layer
model_opt.num_head = opt.num_head
model_opt.model_dim = opt.model_dim
model_opt.nin_dim_de = opt.nin_dim_de
model_opt.self_attn_type = opt.self_attn_type
model_opt.dropout = opt.dropout
#lock the grad for the encoder
model.encoder.embedding.word_emb.requires_grad = False
if model_opt.param_init != 0.0:
for p in model.parameters():
if(p.requires_grad):
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
for p in model.parameters():
if(p.requires_grad):
if(p.dim()>1):
xavier_normal_(p)
if(opt.replace):
#one for the pretrain model and the other for the new model
logger.info("with mid layer {0} {1}".format(model_opt.model_dim,opt.model_dim))
model.mid = nn.Linear(model_opt.model_dim,opt.model_dim)
return model
def build_model_pre(model_opt,opt,data_ori,data_new,gpu,checkpoint=None):
#in our work,we only use text
#build encoder
encoder = build_encoder(model_opt,data_ori['source'],len(data_ori['tag']))
logger.info("build the origin encoder")
decoder = build_decoder(model_opt,data_ori['target'])
logger.info("build the origin decoder")
device = torch.device("cuda" if gpu else "cpu")
model = transformer.Transformer(encoder,decoder)
print(model)
if(checkpoint):
logger.info('loading model weight from checkpoint')
model.load_state_dict(checkpoint['model'])
else:
raise ValueError('cant access this mode without using pretrain model')
model = change(model_opt,opt,model,data_new)
#print(model)
n_params = sum([p.nelement() for p in model.parameters()])
enc = 0
dec = 0
for name, param in model.named_parameters():
if 'encoder' in name:
enc += param.nelement()
elif 'decoder' or 'generator' in name:
dec += param.nelement()
print("the size will be {0} {1} {2}".format(n_params,enc,dec))
model.to(device)
logger.info('the model is now in the {0} mode'.format(device))
return model
def build_model(model_opt,opt,data_token,checkpoint):
logger.info('Building model...')
model = build_base_model(model_opt,opt,data_token,torch.cuda.is_available(),checkpoint)
return model
| [
"ericet1234@gmail.com"
] | ericet1234@gmail.com |
be1ae9233870e4cb74d127c269ecafd6a6201e85 | 46b002b8af55c62689e10e0758ec8e8005893252 | /RESTful/drones/v2/urls.py | fe2db487a5dbd051ca6d3b85db3ef4960aba1ae8 | [] | no_license | DictumAcFactum/books_with_code | e2ff5f500a1b3c7298bcf64c4f26c10284ed8f08 | 327bdf2fdd1c483dad0a841fdd7b9d364a7957fc | refs/heads/master | 2023-04-05T15:25:57.974362 | 2021-04-11T08:57:14 | 2021-04-11T08:57:14 | 277,194,212 | 0 | 0 | null | 2021-04-11T08:57:47 | 2020-07-04T22:28:02 | Python | UTF-8 | Python | false | false | 1,013 | py | from django.conf.urls import url
from .. import views
from ..v2 import views as views_v2
app_name = 'drones_v2'
urlpatterns = [
url(r'^vehicle-categories/$', views.DroneCategoryList.as_view(), name=views.DroneCategoryList.name),
url(r'^vehicle-categories/(?P<pk>[0-9]+)$', views.DroneCategoryDetail.as_view(),
name=views.DroneCategoryDetail.name),
url(r'^vehicles/$', views.DroneList.as_view(), name=views.DroneList.name),
url(r'^vehicles/(?P<pk>[0-9]+)$', views.DroneDetail.as_view(), name=views.DroneDetail.name),
url(r'^pilots/$', views.PilotList.as_view(), name=views.PilotList.name),
url(r'^pilots/(?P<pk>[0-9]+)$', views.PilotDetail.as_view(), name=views.PilotDetail.name),
url(r'^competitions/$', views.CompetitionList.as_view(), name=views.CompetitionList.name),
url(r'^competitions/(?P<pk>[0-9]+)$', views.CompetitionDetail.as_view(), name=views.CompetitionDetail.name),
url(r'^$', views_v2.ApiRootVersion2.as_view(), name=views_v2.ApiRootVersion2.name),
]
| [
"pinkfloydx20@gmail.com"
] | pinkfloydx20@gmail.com |
51a378f46c55b18901d1b9fae4907840619b1b56 | 752881a4f3ae95760e7645a9c610b0be3d57a188 | /ethnode/toolkit/mockproc.py | 8c023d5e2bc523a6f464deb07cb592f77bbd8ae5 | [] | no_license | AndriyKhrobak/Ethearnal | 2b57f5d8ef69348b4870c9d28671857bb7c69a35 | 29ff78b12085796bc25deb2e92abc9caacbee5f7 | refs/heads/master | 2021-05-08T23:57:03.435539 | 2018-01-22T03:35:33 | 2018-01-22T03:35:33 | 119,725,779 | 0 | 1 | null | 2018-01-31T18:23:49 | 2018-01-31T18:23:49 | null | UTF-8 | Python | false | false | 578 | py | #!/usr/bin/env python
import sys
from time import sleep
from datetime import datetime
def main(name: str, time_interval: float):
cnt = 0
while True:
cnt += 1
sys.stdout.write(datetime.now().isoformat())
sys.stdout.flush()
sleep(time_interval)
sys.stdout.write('\r')
sys.stdout.flush()
if __name__ == '__main__':
if len(sys.argv) != 3:
print('usage: mockproc <string> <sleep interval>')
sys.exit(-1)
name = sys.argv[1]
interval = float(sys.argv[2])
main(name, interval)
sys.exit(0)
| [
"dobri.stoilov@gmail.com"
] | dobri.stoilov@gmail.com |
7b15a582c9c0112ff2f9ca9b1f7fc316751f89ce | 0cd12af8acd9233d76ca6c228d80768e7c4dc041 | /.c9/metadata/environment/~/.c9/python3/lib/python3.6/site-packages/django/conf/urls/__init__.py | 85bf3b7bc11272915701a44ff55b5f3cf8d4174d | [] | no_license | sheldon18/todo | eb700aac7be54145413901257bba6b44f7e9c21f | 1c141a2a1260dbfd9e0b6962fbe0b81e0acde005 | refs/heads/master | 2022-12-14T13:20:31.348810 | 2020-03-01T03:15:21 | 2020-03-01T03:15:21 | 244,056,042 | 0 | 0 | null | 2022-12-08T03:42:47 | 2020-02-29T23:19:36 | Python | UTF-8 | Python | false | false | 479 | py | {"filter":false,"title":"__init__.py","tooltip":"~/.c9/python3/lib/python3.6/site-packages/django/conf/urls/__init__.py","undoManager":{"mark":-1,"position":-1,"stack":[]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":12,"column":45},"end":{"row":12,"column":45},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1582427020102,"hash":"828d854749c372a42ae9b5e1699e615bbb5e94c9"} | [
"ubuntu@ip-172-31-81-27.ec2.internal"
] | ubuntu@ip-172-31-81-27.ec2.internal |
fa4239e0b5480d4e87dbed980b75e0c6b0c42548 | 07d29263b1b9bdc9f8bc72c3f3e762eeae57dd35 | /zuowenSTN/regression_code/eval.py | 2b242101c9142798bbc8b465e9cfba404f7c270c | [] | no_license | ZuowenWang0000/BachelorThesis | f5c4a24ef01b9420b0bbf2a20611b7388e068ca8 | a50030e0c94eba3972f789b29fee473da20fed5a | refs/heads/master | 2022-02-21T23:12:23.932460 | 2019-05-10T13:23:26 | 2019-05-10T13:23:26 | 169,987,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,306 | py | """
Evaluation of a given checkpoint in the standard and adversarial sense. Can be
called as an infinite loop going through the checkpoints in the model directory
as they appear and evaluating them. Accuracy and average loss are printed and
added as tensorboard summaries.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from datetime import datetime
import json
import math
import os
import sys
import time
import copy
import numpy as np
import tensorflow as tf
from tqdm import trange
import cifar10_input
import cifar100_input
import svhn_input
import resnet_reg
import vgg
from spatial_attack import SpatialAttack
import utilities
# A function for evaluating a single checkpoint
def evaluate(model, attack, sess, config, attack_type, data_path,
summary_writer=None, eval_on_train=False):
num_eval_examples = config.eval.num_eval_examples
eval_batch_size = config.eval.batch_size
if config.data.dataset_name == "cifar-10":
data_iterator = cifar10_input.CIFAR10Data(data_path)
elif config.data.dataset_name == "cifar-100":
data_iterator = cifar100_input.CIFAR100Data(data_path)
elif config.data.dataset_name == "svhn":
data_iterator = svhn_input.SVHNData(data_path)
else:
raise ValueError("Unknown dataset name.")
global_step = tf.train.get_or_create_global_step()
# Iterate over the samples batch-by-batch
num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
total_xent_nat = 0.
total_xent_adv = 0.
total_corr_nat = 0
total_corr_adv = 0
for ibatch in trange(num_batches):
bstart = ibatch * eval_batch_size
bend = min(bstart + eval_batch_size, num_eval_examples)
if eval_on_train:
x_batch = data_iterator.train_data.xs[bstart:bend, :]
y_batch = data_iterator.train_data.ys[bstart:bend]
else:
x_batch = data_iterator.eval_data.xs[bstart:bend, :]
y_batch = data_iterator.eval_data.ys[bstart:bend]
noop_trans = np.zeros([len(x_batch), 3])
if config.eval.adversarial_eval:
x_batch_adv, adv_trans = attack.perturb(x_batch, y_batch, sess)
else:
x_batch_adv, adv_trans = x_batch, noop_trans
dict_nat = {model.x_input: x_batch,
model.y_input: y_batch,
model.transform: noop_trans,
model.is_training: False}
dict_adv = {model.x_input: x_batch_adv,
model.y_input: y_batch,
model.transform: adv_trans,
model.is_training: False}
cur_corr_nat, cur_xent_nat = sess.run([model.num_correct, model.xent],
feed_dict = dict_nat)
cur_corr_adv, cur_xent_adv = sess.run([model.num_correct, model.xent],
feed_dict = dict_adv)
total_xent_nat += cur_xent_nat
total_xent_adv += cur_xent_adv
total_corr_nat += cur_corr_nat
total_corr_adv += cur_corr_adv
avg_xent_nat = total_xent_nat / num_eval_examples
avg_xent_adv = total_xent_adv / num_eval_examples
acc_nat = total_corr_nat / num_eval_examples
acc_adv = total_corr_adv / num_eval_examples
if summary_writer:
summary = tf.Summary(value=[
tf.Summary.Value(tag='xent_adv_eval', simple_value= avg_xent_adv),
tf.Summary.Value(tag='xent_nat_eval', simple_value= avg_xent_nat),
tf.Summary.Value(tag='xent_adv', simple_value= avg_xent_adv),
tf.Summary.Value(tag='xent_nat', simple_value= avg_xent_nat),
tf.Summary.Value(tag='accuracy_adv_eval', simple_value= acc_adv),
tf.Summary.Value(tag='accuracy_nat_eval', simple_value= acc_nat),
tf.Summary.Value(tag='accuracy_adv', simple_value= acc_adv),
tf.Summary.Value(tag='accuracy_nat', simple_value= acc_nat)])
summary_writer.add_summary(summary, global_step.eval(sess))
step = global_step.eval(sess)
print('Eval at step: {}'.format(step))
print(' Adversary: ', attack_type)
print(' natural: {:.2f}%'.format(100 * acc_nat))
print(' adversarial: {:.2f}%'.format(100 * acc_adv))
print(' avg nat xent: {:.4f}'.format(avg_xent_nat))
print(' avg adv xent: {:.4f}'.format(avg_xent_adv))
return [100 * acc_nat, 100 * acc_adv, avg_xent_nat, avg_xent_adv]
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Eval script options',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-c', '--config', type=str,
help='path to config file',
default="configs/christinaconfig_cifar10_spatial_eval.json", required=False)
parser.add_argument('--save_root_path', type=str,
help='path to repo dir',
default='/Users/heinzec/projects/core-da/repo_dir_7jan', required=False)
parser.add_argument('--exp_id_list', type=str, nargs='+',
default=['3e3p7xPG98_1058376','4hbWroJkyE_1058258'])
parser.add_argument('--eval_on_train', type=int,
help='flag whether to use training or test images',
default=0, required=False)
parser.add_argument('-s', '--save_filename', type=str,
help='path to plots folder',
default='test.json', required=False)
parser.add_argument('--linf_attack', type=int,
help='path to plots folder',
default=0, required=False)
args = parser.parse_args()
config_dict = utilities.get_config(args.config)
dataset = config_dict['data']['dataset_name']
# setting up save folders
split = 'train' if args.eval_on_train else 'test'
print(args.exp_id_list)
save_folder = os.path.join(args.save_root_path,
'additional_evals_{}'.format(dataset))
os.makedirs(save_folder, exist_ok=True)
save_filename = os.path.join(save_folder,
'{}_{}_{}'.format(dataset, split, args.save_filename))
if args.eval_on_train:
if dataset == 'cifar-10' or dataset == 'cifar-100':
config_dict['eval']['num_eval_examples'] = 50000
elif dataset == 'svhn':
config_dict['eval']['num_eval_examples'] = 73257
else:
raise NotImplementedError
config_dict_copy = copy.deepcopy(config_dict)
out_dict = {}
out_dict['hyperparameters'] = config_dict_copy
config = utilities.config_to_namedtuple(config_dict)
# num_ids in model does not matter for eval
num_ids = 64
model_family = config.model.model_family
if model_family == "resnet":
if config.attack.use_spatial and config.attack.spatial_method == 'fo':
diffable = True
else:
diffable = False
model = resnet_reg.Model(config.model, num_ids, diffable)
elif model_family == "vgg":
if config.attack.use_spatial and config.attack.spatial_method == 'fo':
diffable = True
else:
diffable = False
model = vgg.Model(config.model, num_ids, diffable)
global_step = tf.train.get_or_create_global_step()
if args.linf_attack:
attack_eval = SpatialAttack(model, config.attack, 'fo', 1,
config.attack.spatial_limits,
config.attack.epsilon,
config.attack.step_size,
config.attack.num_steps)
else:
attack_eval = SpatialAttack(model, config.attack, 'grid')
saver = tf.train.Saver()
for id in args.exp_id_list:
out_dict[id] = {}
model_dir = '%s/logdir/%s' % (args.save_root_path, id)
ckpt = tf.train.get_checkpoint_state(model_dir)
if ckpt is None:
print('No checkpoint found.')
else:
with tf.Session() as sess:
# Restore the checkpoint
saver.restore(sess,
os.path.join(model_dir,
ckpt.model_checkpoint_path.split("/")[-1]))
[acc_nat, acc_grid, _, _] = evaluate(
model, attack_eval, sess, config, 'grid',
config.data.data_path, eval_on_train=args.eval_on_train)
out_dict[id]['{}_grid_accuracy'.format(split)] = acc_grid
out_dict[id]['{}_nat_accuracy'.format(split)] = acc_nat
# save results
with open(save_filename, 'w') as result_file:
json.dump(out_dict, result_file, sort_keys=True, indent=4)
grid_accuracy = []
nat_accuracy = []
for key in out_dict:
if key != 'hyperparameters':
grid_accuracy.append(out_dict[key]['{}_grid_accuracy'.format(split)])
nat_accuracy.append(out_dict[key]['{}_nat_accuracy'.format(split)])
out_dict['{}_grid_accuracy_summary'.format(split)] = (np.mean(grid_accuracy),
np.std(grid_accuracy))
out_dict['{}_nat_accuracy_summary'.format(split)] = (np.mean(nat_accuracy),
np.std(nat_accuracy))
with open(save_filename, 'w') as result_file:
json.dump(out_dict, result_file, sort_keys=True, indent=4)
| [
"noreply@github.com"
] | noreply@github.com |
8232e6ec961ef965559c59a8301dda4f3979961a | ec4d4509d9ec9885f7f1461efde321f38c1df62c | /test.py | 9edc383e0f2763059d4956baff6ee529cf00868b | [] | no_license | skollhati/CLACK | d60071479c38feab871f1216affc22a84f23655f | 34b451d389785d7c38ef4a54c4e01f148f224844 | refs/heads/master | 2020-07-28T02:46:13.592736 | 2019-10-25T03:54:30 | 2019-10-25T03:54:30 | 209,284,253 | 0 | 0 | null | 2020-07-18T14:54:41 | 2019-09-18T10:45:43 | CSS | UTF-8 | Python | false | false | 1,914 | py | s1 = [4,99,2,6,7,13,88,76]
s2 = [6,88,13,4,99,2,7]
from collections import Counter
import re
s3 = 'abcdea'
s4 = 'cookoie'
s1_char_count = set(sorted(Counter(list(s3)).items()))
s2_char_count = set(sorted(Counter(list(s4)).items()))
t = [1,2]
from collections import Counter
import re
def char_count_check(s1,s2):
s1_char_count = list(Counter(list(s1)).items())
s2_char_count = list(Counter(list(s2)).items())
for idx,count_item in enumerate(s1_char_count):
try:
if s2_char_count[idx][0] != count_item[0]:
return False
if s2_char_count[idx][1] < count_item[1]:
return False
except IndexError as e:
return False
return True
def find_match(s2):
for idx,char in enumerate(s2):
tmp_idx = idx
for sub_i in range(idx+1, len(s2)) :
if char == s2[sub_i]:
if(sub_i - tmp_idx) == 1:
tmp_idx = sub_i
else:
return False
else:
break
return True
def find_match2(s2):
for idx,char in enumerate(s2):
del_char = ''
for sub_idx in range(idx+1,len(s2)):
if s2[sub_idx] == char:
if del_char != '':
return False
else:
del_char = char
return True
def find_match3(s1,s2):
tmp =list()
a = 0
for idx,char in enumerate(s1):
if char in tmp:
continue
for sub_idx in range(idx+a,len(s2)):
if s2[sub_idx] in tmp:
return False
if s2[sub_idx] != char:
a=sub_idx-idx-1
break
tmp.append(char)
return True
def solution(s1, s2):
if char_count_check(s1, s2) and find_match3(s1,s2):
return True
else:
return False
print(solution('cookie','coookieeo')) | [
"df201901@gmail.com"
] | df201901@gmail.com |
54c39f42bb7749d582e0b83874b3e1f806019822 | 8704f40748ab2e247e16777ab0cfeaab6bacd201 | /anaconda_code/Assignment/linearAdvect.py | 6985c72a98d58f4a2367244c62e8da7898f0c033 | [] | no_license | ojp1995/Numerics_1 | e3ac8a345ebe514aefef92b5531139e613731b27 | 63eda51570f0de2bcff86ddeb8b894390e4ddb74 | refs/heads/master | 2020-03-31T13:27:55.894134 | 2018-11-21T13:54:12 | 2018-11-21T13:54:12 | 152,256,709 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,304 | py | #!/usr/bin/python3
# Outer code for setting up the linear advection problem on a uniform
# grid and calling the function to perform the linear advection and plot.
### The matplotlib package contains plotting functions ###
import matplotlib.pyplot as plt
import numpy as np
# read in all the linear advection schemes, initial conditions and other
# code associated with this application
from OJPinitialConditions import *
from OJPadvectionSchemes import *
from OJPdiagnostics import *
def convergence_exp():
"Experiment to test the convergence of methods as we increase the"
"reolution"
n_exp_size = 30 ##number of times we are increasing the spacial and time resolution
u = 0.2 #constant wind speed
xmin = 0
xmax = 1
##initialise error and l2error vectors for each of the methods
l2FTBS_dx_err = np.zeros(n_exp_size)
l2CTCS_dx_err = np.zeros(n_exp_size)
l2LW_dx_err = np.zeros(n_exp_size)
errorFTBS = np.zeros(n_exp_size)
errorCTCS = np.zeros(n_exp_size)
errorLW = np.zeros(n_exp_size)
dx_it = np.zeros(n_exp_size)
##initialise control lines for graph
delta_x = np.zeros(n_exp_size)
delta_x2 = np.zeros(n_exp_size)
##loop for increasing resolution
for i in range(0,n_exp_size):
nx = i*10 + 10 ##increasing spacial step
dx = (xmax - xmin)/nx
nt = nx ##keeping overall time constant
dx_it[i] = dx
c = 0.4*(nx/nt)
# spatial points for plotting and for defining initial conditions
x = np.arange(xmin, xmax, dx)
# Initial conditions
phiOld = cosBell(x, 0.25, 0.75)
# Exact solution is the initial condition shifted around the domain
phiAnalytic = cosBell((x - c*nt*dx)%(xmax - xmin), 0.25, 0.75)
# Advect the profile using finite difference for all the time steps
phiFTBS = FTBS(phiOld, c, nt)
phiCTCS = CTCS(phiOld, c, nt)
phiLW = LW(phiOld, c, nt)
##computing points for control lines
delta_x[i] = dx
delta_x2[i] = dx**2
##calculating the l2error for each method
l2FTBS_dx_err[i], errorFTBS = l2ErrorNorm(phiFTBS, phiAnalytic)
l2CTCS_dx_err[i], errorCTCS = l2ErrorNorm(phiCTCS[nt-1,:], phiAnalytic)
l2LW_dx_err[i], errorLW = l2ErrorNorm(phiLW, phiAnalytic)
##plotting l2 error against increase in dx on a loglog graph
plt.figure(1, figsize=(10,7))
plt.clf()
plt.loglog(dx_it, l2FTBS_dx_err, label='FTBS', color = 'red')
plt.loglog(dx_it, l2CTCS_dx_err, label='CTCS', color = 'green')
plt.loglog(dx_it, l2LW_dx_err, label = 'LW', color = 'orange')
plt.loglog(dx_it, delta_x, label='$\Delta x$', linestyle=':', color = 'black')
plt.loglog(dx_it, delta_x2, label = '$\Delta x^{2}$', linestyle = ':', color = 'blue')
plt.ylabel('$l_{2}$ error norm')
plt.xlabel('Number of spacial steps')
plt.legend()
plt.title('Loglog plot of error norms as of spacial points increase')
convergence_exp()
nx_list = (40, 160, 200, 240) ##list of values to vary spacial step to vary c
def c_exp():
"Experiment to test the Von_Neumann stability analysis by varying the "
"spacial steps to vary the courant number"
# Parameters
xmin = 0
xmax = 1
##loop over the differnet sizes of spacial steps
for i in range(len(nx_list)):
nx = nx_list[i] ##varying number of spacial steps to vary c
nt = 40 ##keeping spacial steps constant so c varys
u=0.2 ##wind speed constant
dx = (xmax - xmin)/nx
c = u*(nx/nt)
print(c) ##printing the courant number each time to double check how it changes
x = np.arange(xmin, xmax, dx)
# Initial conditions
phiOld = cosBell(x, 0.25, 0.75)
# Exact solution is the initial condition shifted around the domain
phiAnalytic = cosBell((x - c*nt*dx)%(xmax - xmin), 0.25, 0.75)
# Advect the profile using finite difference for all the time steps
phiFTCS = FTCS(phiOld, c, nt)
phiFTBS = FTBS(phiOld, c, nt)
phiCTCS = CTCS(phiOld, c, nt)
phiLW = LW(phiOld, c, nt)
##calculating the l2error norms
l2FTCS, errorFTCS = l2ErrorNorm(phiFTCS, phiAnalytic)
l2FTBS, errorFTBS = l2ErrorNorm(phiFTBS, phiAnalytic)
l2CTCS, errorCTCS = l2ErrorNorm(phiCTCS[nt-1,:], phiAnalytic)
l2LW, errorLW = l2ErrorNorm(phiLW, phiAnalytic)
font = {'size' : 20}
plt.rc('font', **font)
plt.figure(i+2, figsize=(10,7))
plt.clf()
plt.ion()
plt.plot(x, phiOld, label='Initial', color='black')
plt.plot(x, phiAnalytic, label='Analytic', color='black',
linestyle='--', linewidth=2)
plt.plot(x, phiFTBS, label='FTBS', color='red')
plt.plot(x, phiCTCS[nt-1,:], label='CTCS', color='green') #using second to last time step of t to plot
plt.plot(x, phiLW, label='Lax-Wendroff', color="orange") #using second to last time step to plot
plt.axhline(0, linestyle=':', color='black')
plt.ylim([-0.2,1.4]) #increased y limiy to show where LW seems to be going wrong
plt.legend()
plt.xlabel('$x$')
plt.ylabel('$\phi$')
plt.title('Linear Advection where c=%f'%c)
##printing l2 and linf norm so we can see how error changes as we
##increase resolution
print("FTBS l2 error norm = ", l2FTBS)
print("FTBS linf error norm = ", lInfErrorNorm(phiFTBS, phiAnalytic))
print("CTCS l2 error norm = ", l2CTCS)
print("CSCS linf error norm = ", lInfErrorNorm(phiCTCS, phiAnalytic))
print("LW l2 error norm = ", l2LW)
print("LW linf error norm = ", lInfErrorNorm(phiLW, phiAnalytic))
c_exp()
def TV():
"Experiment to test total variation of each method by computing the"
"variation at each time step"
## Parameters
xmin = 0
xmax = 1
nx = 100
nt = 100
u=0.2 ##wind speed, keeping constant
c = u*(nx/nt)
## Derived parameters
dx = (xmax - xmin)/nx
## spatial points for plotting and for defining initial conditions
x = np.arange(xmin, xmax, dx)
## Initial conditions
phiOld = cosBell(x, 0.25, 0.75)
##initialising vectors to store Vartiation for each time step
TV_FTBS = np.zeros(nx-2)
TV_CTCS = np.zeros(nx-2)
TV_LW = np.zeros(nx-2)
##initialising for CTCS as used matrix method
phi_CTCS = np.zeros((nt,nx))
phiCTCS = np.zeros((1, nx))
for k in range(2,nt): ##looping for each time step
##for each time step creating fresh zero vector spacial step variation
TVinter_FTBS = np.zeros(nt)
TVinter_CTCS = np.zeros(nt)
TVinter_LW = np.zeros(nt)
##
phi_FTBS = FTBS(phiOld, c, k)
phi_CTCS = CTCS(phiOld, c, k)
phi_LW = LW(phiOld, c, k)
phiCTCS = phi_CTCS[k-2,:]
for i in range(nx): ##loop for each spacial difference
##computing difference between each spacial step for each method
TVinter_FTBS[i] = abs( phi_FTBS[(i+1)%nx] - phi_FTBS[i] )
TVinter_CTCS[i] = abs( phiCTCS[(i+1)%nx] - phiCTCS[i] )
TVinter_LW[i] = abs( phi_LW[(i+1)%nx] - phi_LW[i] )
##summing to find total variation for each timestep
TV_FTBS[k-2] = sum(TVinter_FTBS)
TV_CTCS[k-2] = sum(TVinter_CTCS)
TV_LW[k-2] = sum(TVinter_LW)
##plotting total variation against time
plt.figure(6, figsize=(10,7))
plt.clf()
plt.plot(TV_FTBS, label='FTBS', color='blue')
plt.plot(TV_CTCS, label='CTCS', color='green')
plt.plot(TV_LW, label='LW', color='orange')
plt.legend()
plt.xlabel('time step')
plt.ylabel('Total Variation')
plt.title('Total Variation of Advection methods')
TV()
| [
"contact.o.j.phillips@gmail.com"
] | contact.o.j.phillips@gmail.com |
dcae116535fa0ee935040906690848d5008cbce8 | b9502638bb6176120107a600aaefcffc9d43ac9f | /test_pytest/test_yml.py | 211e36a9c88ceb3c5530408f35084b0cfcbaa42f | [] | no_license | zhanglei12306/Python_Practice | ccd174c620100160439d747d3c79882104564f85 | 1e3770db673555b3e98e55fdc3a690af2d80630e | refs/heads/master | 2023-03-31T11:14:07.846445 | 2021-04-05T06:52:36 | 2021-04-05T06:52:36 | 321,687,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,509 | py | # @pytest.mark.parametrize("参数名",列表数据)
# 参数名:作为测试用例的参数. 字符串格式,多个参数中间用逗号隔开。
# 列表数据:一组测试数据。list格式,多组数据用元组类型,
# list的每个元素都是一个元组,元组里的每个元素和按参数顺序一一对应。
# 可以添加ids参数指定用例说明(别名)
import pytest
import yaml
# def add_fun(a, b):
# return a + b + 10
# @pytest.mark.parametrize("a, b, expected", yaml.safe_load(open("data.yml"))["datas"],
# ids=yaml.safe_load(open("data.yml"))["myids"])
# def test_add(a, b, expected):
# assert add_fun(a, b) == expected
# def get_datas():
# with open("data.yml") as f:
# datas = yaml.safe_load(f)
# print(datas) # {'datas': [[1, 1, 12], [-1, -2, 7], [1000, 1000, 2010]], 'myids': ['one', 'two', 'three']}
# print(50 * '*')
# # 获取文件中key为datas的数据
# add_datas = datas["datas"]
# print(50 * '-')
# print(add_datas) #[[1, 1, 12], [-1, -2, 7], [1000, 1000, 2010]]
# # 获取文件中key为myids的数据
# add_ids = datas["myids"]
# print(50 * '+')
# print(add_ids) #['one', 'two', 'three']
# return [add_datas, add_ids]
#
# def add_fun(a, b):
# return a + b + 10
# @pytest.mark.parametrize("a, b, expected", get_datas()[0], ids=get_datas()[1])
# def test_add(a, b, expected):
# assert add_fun(a, b) == expected
| [
"1044570046@qq.com"
] | 1044570046@qq.com |
0703769d9b487fad66ce7c6dc02f421d17d9732c | 6f636814e48124376fb1a649ac6758f09ecaf05b | /Final_Project/catkin_ws/devel/lib/python2.7/dist-packages/turtlebot3_msgs/srv/_SetFollowState.py | 9c1ca61ffba4d64903898e570169e1ea7b2aeff6 | [] | no_license | JiaqiaoZhang/aa274 | fbb202210f7879b917ee63e7fa15a80ecae1323c | 693e1a27cbaa8eca62a05b53cf2a487f85b6610a | refs/heads/master | 2020-08-23T18:37:17.050587 | 2020-01-29T07:28:07 | 2020-01-29T07:28:07 | 216,683,320 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,482 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from turtlebot3_msgs/SetFollowStateRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class SetFollowStateRequest(genpy.Message):
_md5sum = "92b912c48c68248015bb32deb0bf7713"
_type = "turtlebot3_msgs/SetFollowStateRequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """uint8 STOPPED = 0
uint8 FOLLOW = 1
uint8 state
"""
# Pseudo-constants
STOPPED = 0
FOLLOW = 1
__slots__ = ['state']
_slot_types = ['uint8']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
state
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(SetFollowStateRequest, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.state is None:
self.state = 0
else:
self.state = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_get_struct_B().pack(self.state))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 1
(self.state,) = _get_struct_B().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_get_struct_B().pack(self.state))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 1
(self.state,) = _get_struct_B().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from turtlebot3_msgs/SetFollowStateResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class SetFollowStateResponse(genpy.Message):
_md5sum = "37065417175a2f4a49100bc798e5ee49"
_type = "turtlebot3_msgs/SetFollowStateResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """
uint8 OK = 0
uint8 ERROR = 1
uint8 result
"""
# Pseudo-constants
OK = 0
ERROR = 1
__slots__ = ['result']
_slot_types = ['uint8']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
result
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(SetFollowStateResponse, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.result is None:
self.result = 0
else:
self.result = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_get_struct_B().pack(self.result))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 1
(self.result,) = _get_struct_B().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_get_struct_B().pack(self.result))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 1
(self.result,) = _get_struct_B().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
class SetFollowState(object):
_type = 'turtlebot3_msgs/SetFollowState'
_md5sum = '6095eaec0ed61c547340fdc2200c8372'
_request_class = SetFollowStateRequest
_response_class = SetFollowStateResponse
| [
"zhangxiaoqiao@DN51t4mt.SUNet"
] | zhangxiaoqiao@DN51t4mt.SUNet |
3fcf015d019f27b3041c1f8f1b06657a5c2712f3 | 13b97ffc3ba29414e19e0650682e2bfd5ce2b79a | /simplecharts.py | 37992a6518e0c74bdf60afe8c2fd065f9fcd4adb | [] | no_license | JamesFarrant/Personal-Projects | cf64918f8513cfe1f6d1fc57a661782383aa2d18 | 47309dc8b85208bbf9a517f334937eb43172b125 | refs/heads/master | 2021-01-25T05:35:32.516857 | 2017-05-29T16:11:10 | 2017-05-29T16:11:10 | 30,017,360 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 473 | py | import matplotlib.pyplot as plt
# X and Y must be the same.
x = [1, 2, 3, 4, 5]
y = [4, 7, 4, 7, 3]
# Adding a new line
y2 = [5, 3, 2, 6, 2]
# Plotting the graph and adding labels to lines
plt.plot(x, y, label='Initial Line')
plt.plot(x, y2, label='New Line')
# Labelling our axes and adding title
plt.xlabel('Plot Number')
plt.ylabel('Random Numbers')
plt.title('My Awesome Graph')
# Legends are added here
plt.legend()
# This must be last to show everything.
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
5778000492192034131651c7d7d3a99d974dfc42 | 0b3f84f8891069bde05f896515bb93fb8dfda365 | /tasks/zaj8/qs.py | 72ae14a9c8263032c9a0db9c0cdcaa007e8a03f7 | [] | no_license | tybur/pwzn | 69e9377c71f88f200262804a818237e8e978c14b | 26f322451741c30aae00c43a7e021d27e03e441a | refs/heads/master | 2021-01-14T14:28:35.402781 | 2015-04-04T12:36:33 | 2015-04-04T12:36:33 | 26,272,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,977 | py | def partition(list, start, end):
pivot = list[end] # Partition around the last value
bottom = start-1 # Start outside the area to be partitioned
top = end # Ditto
done = 0
while not done: # Until all elements are partitioned...
while not done: # Until we find an out of place element...
bottom = bottom+1 # ... move the bottom up.
if bottom == top: # If we hit the top...
done = 1 # ... we are done.
break
if list[bottom] > pivot: # Is the bottom out of place?
list[top] = list[bottom] # Then put it at the top...
break # ... and start searching from the top.
while not done: # Until we find an out of place element...
top = top-1 # ... move the top down.
if top == bottom: # If we hit the bottom...
done = 1 # ... we are done.
break
if list[top] < pivot: # Is the top out of place?
list[bottom] = list[top] # Then put it at the bottom...
break # ...and start searching from the bottom.
list[top] = pivot # Put the pivot in its place.
return top # Return the split point
def quicksort(list, start, end):
if start < end: # If there are two or more elements...
split = partition(list, start, end) # ... partition the sublist...
quicksort(list, start, split-1) # ... and sort both halves.
quicksort(list, split+1, end)
else:
return
| [
"mistrzenator@gmail.com"
] | mistrzenator@gmail.com |
4e3d52464d257688f122a23748edd43590043b89 | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flasharray/FA_2_24/models/network_interface_neighbor_capability.py | 76ae203e5847eb8a031597b8f2d39119f564eac0 | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 4,245 | py | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.24
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_24 import models
class NetworkInterfaceNeighborCapability(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'supported': 'bool',
'enabled': 'bool'
}
attribute_map = {
'supported': 'supported',
'enabled': 'enabled'
}
required_args = {
}
def __init__(
self,
supported=None, # type: bool
enabled=None, # type: bool
):
"""
Keyword args:
supported (bool): If true, this capability is supported by this neighbor; false otherwise.
enabled (bool): If true, this capability is enabled by this neighbor; false otherwise.
"""
if supported is not None:
self.supported = supported
if enabled is not None:
self.enabled = enabled
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `NetworkInterfaceNeighborCapability`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `NetworkInterfaceNeighborCapability`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `NetworkInterfaceNeighborCapability`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `NetworkInterfaceNeighborCapability`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(NetworkInterfaceNeighborCapability, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NetworkInterfaceNeighborCapability):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"noreply@github.com"
] | noreply@github.com |
26c7a8acf057a32238bd83840d9c85a9892388da | 541cb3a837c7d4a10d880f96c346adead81628f0 | /drake_modules/imgui/__init__.py | 1c026c3c50bab88c87b8243b27d851ba25c6ab04 | [] | no_license | k0rm1d/engine | ef1e9669242f89b8466481e825d7ef0bc7000304 | 3094f94190240a6642303199fc854900485fdfaf | refs/heads/master | 2021-05-12T00:56:31.941950 | 2018-02-10T14:44:47 | 2018-02-10T14:44:47 | 117,546,460 | 0 | 0 | null | 2018-02-10T14:39:34 | 2018-01-15T13:06:40 | Python | UTF-8 | Python | false | false | 2,582 | py | from .. import Module
import drake
class imgui(Module):
EXPECTED = {
"1.53": {
"headers": ["imgui.h"],
"sources": ["imgui.cpp", "imgui_draw.cpp"],
"libraries": {
"linux": ["libimgui.so", "libingui_draw.so"]
},
"others": []
}
}
def __init__(self,
version,
base_url = None,
platform = "linux",
dest = drake.Path("imgui"),
expected_headers = None):
super().__init__("imgui")
self.__base_url = base_url or "https://github.com/ocornut/imgui/archive"
self.__version = version
self.__platform = "linux"
self.__tar = drake.Node(dest / "v{version}.tar.gz".format(version = self.__version))
self.__path = dest / "imgui-{version}".format(version = self.__version)
self.__include_path = self.__path
self.__source_path = self.__path
self.__library_path = self.__path
self.__cmake_lists = drake.Node(self.__path / "CMakeLists.txt")
self.__makefile = drake.Node(self.__path / "Makefile")
drake.HTTPDownload(url = self.url, dest = self.__tar)
drake.Extractor(tarball = self.__tar,
targets = map(lambda f: str(f.name_absolute())[len(str(dest)) + 1:],
self.headers + self.sources))
for index, cpp in enumerate(self.sources):
drake.ShellCommand(
sources = [cpp],
targets = [self.libraries[index]],
command = [
'g++', '-shared', '-fPIC', str(cpp.path().basename()), '-o',
str(self.libraries[index].path().basename())
],
cwd = self.__path)
@property
def headers(self):
return drake.nodes(*[self.__include_path / f for f in imgui.EXPECTED[self.__version]["headers"]])
@property
def sources(self):
return drake.nodes(*[self.__source_path / f for f in imgui.EXPECTED[self.__version]["sources"]])
@property
def libraries(self):
return drake.nodes(*[self.__library_path / f for f in imgui.EXPECTED[self.__version]["libraries"][self.__platform]])
@property
def libs(self):
def clean(library):
return str(library.path().basename().without_last_extension())[3:]
return list(map(clean, self.libraries))
@property
def base_path(self):
return self.__base_path
@property
def include_path(self):
return self.__include_path
@property
def library_path(self):
return self.__library_path
@property
def url(self):
return "{base_url}/v{version}.tar.gz".format(
base_url = self.__base_url,
version = self.__version,
)
| [
"antony.mechin@docker.com"
] | antony.mechin@docker.com |
2455804a9deef4d3443589283af4dc9f1ef5c926 | dd8227454b817ccf2ceb24b3dfd4260d4ded7a72 | /scripts/item/consume_2435694.py | 020e445c8c1b62894419c308afa2bc358e797d3f | [
"MIT"
] | permissive | Snewmy/swordie | 0dd3c17808b064c2cb2bd9576b51daf01ae5d686 | ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17 | refs/heads/master | 2023-06-30T21:14:05.225798 | 2021-07-06T14:32:39 | 2021-07-06T14:32:39 | 389,497,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | # Heroes Evan Damage Skin
success = sm.addDamageSkin(2435694)
if success:
sm.chat("The Heroes Evan Damage Skin has been added to your account's damage skin collection.")
| [
"vcalheirosdoc@gmail.com"
] | vcalheirosdoc@gmail.com |
b11400aaf4d36bbcbd0823c03cc8949e54aa522d | 020449fb757ec43f348f80f6d3cec93cc58933ed | /copy_db_poc.py | 8481f1407f7a76f65cb5a43cd50b21e243ea0301 | [] | no_license | Lethgir/copy-db-poc | a1818ecd9bc8c09a5ac83112968e22c40af6f316 | 8c519be132508fd037ad038ed1ffab5106001216 | refs/heads/main | 2023-08-20T05:50:54.106776 | 2021-10-14T10:24:22 | 2021-10-14T10:24:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,899 | py | #!/usr/bin/env python3
import argparse
import copy
import sys
import traceback
from uuid import uuid4
from typing import cast
import sqlalchemy
from sqlalchemy import create_engine, select, func, event
from sqlalchemy import Table, Column, Integer, String, MetaData
TABLE_PREFIX = "dbin_"
DEFAULT_BATCH_SIZE = 1000
in_engine = create_engine(
"postgresql+psycopg2://user:password@127.0.0.1:5432/dbin", echo=True
)
out_engine = create_engine(
"mysql+mysqldb://user:password@127.0.0.1:3306/dbout", echo=True
)
def setup_fixtures():
metadata_in = MetaData()
users = Table(
"users",
metadata_in,
Column(
"id",
sqlalchemy.dialects.postgresql.UUID(as_uuid=True),
primary_key=True,
default=uuid4,
),
Column("num", Integer),
Column("full_name", String),
)
metadata_in.drop_all(in_engine)
metadata_in.create_all(in_engine)
ins = users.insert().values(num=2, full_name="Louis de Funès")
conn = in_engine.connect()
conn.execute(ins)
conn.close()
def get_generic_type(type):
if isinstance(type, sqlalchemy.dialects.postgresql.UUID):
return String(length=36)
try:
new = type.as_generic()
if isinstance(new, String) and not new.length:
# For MySQL
new.length = 500
return new
except NotImplementedError:
traceback.print_exc()
return type
def copy_table(
table: Table,
batch_size: int = DEFAULT_BATCH_SIZE,
) -> None:
"""Copy a table."""
out_table = copy.copy(table)
out_table.name = f"{TABLE_PREFIX}{table.name}"
# Do not copy constraints
out_table.constraints = set([])
out_table.drop(out_engine, checkfirst=True)
out_table.create(out_engine)
with in_engine.connect() as conn_in:
with out_engine.connect() as conn_out:
stmt = select(table)
# stream_results does not work for all db dialects
for r in conn_in.execution_options(stream_results=True).execute(stmt):
# TODO: could use batched queries with bound params, see
# sqlalchemy's doc
ins = out_table.insert().values(**r)
conn_out.execute(ins)
def copy_db():
"""Copy the db to its destination"""
metadata = MetaData()
@event.listens_for(metadata, "column_reflect")
def genericize_datatypes(inspector, tablename, column_dict):
previously = column_dict["type"]
column_dict["type"] = get_generic_type(previously)
metadata.reflect(bind=in_engine)
for t in reversed(metadata.sorted_tables):
copy_table(t)
def main() -> int:
setup_fixtures()
copy_db()
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="copy db over")
args = parser.parse_args()
sys.exit(main())
| [
"120501+charlax@users.noreply.github.com"
] | 120501+charlax@users.noreply.github.com |
12caf078872a5634ca4638aed6dbdbd7776b5062 | 6097031d8e85400214085f152164a29346d106e3 | /maxheap.py | 7e3f269c4b2365d3684fe48cdb32ec815206f9cd | [] | no_license | ekourkchi/GalaxyGroups | 2fccca4998850c0838d0c7ef949bba8b1267716a | 19e98da0015b0462133133a23915e6d633614ad3 | refs/heads/master | 2022-04-03T09:30:19.667796 | 2020-02-13T03:05:48 | 2020-02-13T03:05:48 | 112,898,380 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,137 | py | #!/home/ehsan/Ureka/Ureka/variants/common/bin/python
import numpy as np
from math import *
from copy import *
class heapNode:
key = None
ID = None
flag = False
def __init__(self, key, ID):
self.key = key
self.ID = ID
def toString(self):
print self.key, self.ID, self.flag
# *********************************************
class maxHeap:
size = 0 # Number of current elements
array = []
# *****************
def __init__(self):
self.size = 0
self.array = []
# *****************
def push(self, key, ID):
#print "push:", key, ID, self.size
newNode = heapNode(key, ID)
self.array.append(newNode)
child = self.size
while child > 0:
parent = (child+1)/2-1
if self.array[child].key > self.array[parent].key:
self.array[parent], self.array[child] = self.array[child], self.array[parent]
child = parent
else:
break
#for i in range(0,self.size+1):
#print self.array[i].key
self.size+=1
return 0
# *****************
def lrmax(self, left, right):
if right <= self.size-1:
if self.array[left].key >= self.array[right].key:
return left
else:
return right
elif left <= self.size-1:
return left
else:
return 0
# *****************
def pop(self):
if self.size == 0 :
print "\n[Error] No elements in the mean Heap ...\n"
return None
N = self.size
output = self.array[0]
self.array[0] = self.array[N-1]
parent = 0
while parent <= N-1:
left = 2*parent+1
right = 2*parent+2
child = self.lrmax(left, right)
if child != 0:
if self.array[child].key >= self.array[parent].key:
self.array[parent], self.array[child] = self.array[child], self.array[parent]
parent = child
else:
break
else:
break
self.array.pop(N-1)
self.size -= 1
return output
# *****************
def setFlag(self, key):
if self.size == 0 :
print "\n[Error] No elements in the mean Heap ...\n"
return False
for i in range(0, self.size):
if self.array[i].key == key:
self.array[i].flag = True
# *****************
def peek(self):
if self.size == 0 :
print "\n[Error] No elements in the mean Heap ...\n"
return None
else:
return self.array[0]
# *****************
"""
This method removes heap elements which have the same id as the input ID
The number of removed elements would be returned
"""
def remove(self, ID):
boolean = 0
if self.size == 0 :
#print "\n[Error] No elements in the mean Heap ...\n"
return boolean
else:
i = 0
while i < self.size:
# ID would be the object ID
if self.array[i].ID == ID:
parent = i
N = self.size
self.array[parent] = self.array[N-1]
while parent <= N-1:
left = 2*parent+1
right = 2*parent+2
child = self.lrmax(left, right)
if child != 0:
if self.array[child].key >= self.array[parent].key:
self.array[parent], self.array[child] = self.array[child], self.array[parent]
parent = child
else:
break
else:
break
self.array.pop(N-1)
self.size -= 1
boolean+=1
i-=1 # The new item must be checked again
i+=1
return boolean
# *****************
def Size(self): return self.size
# *****************
def toString(self):
for i in range(0,self.size):
self.array[i].toString();
# *********************************************
# *********************************************
if __name__ == '__main__':
myHeap = maxHeap()
myHeap.push(4, "e4")
myHeap.push(7, "e7")
myHeap.push(2, "e2")
myHeap.push(6, "e6")
myHeap.push(8, "e7")
myHeap.push(5, "e5")
myHeap.push(3, "e7")
print "\n", myHeap.Size()
print myHeap.remove("e5")
print "\n", myHeap.Size()
while myHeap.Size()>0:
myHeap.pop().toString()
#print myHeap.peek().key
| [
"ekourkchi@gmail.com"
] | ekourkchi@gmail.com |
e152459956c322f9cbc51869cd7582e46c883a4f | ca77040a488fea5410449a6ae06693c64c0d3f1a | /web/ui/views/views.py | 8bf22427e4bbe80ad2a7a40c428831f5b4e2c697 | [] | no_license | patricknevindwyer/space-base-otter | 0565412db484b5bb8b2dbc4e8d3878fbd73f7b0b | ea7dd7649c172ca3e4c14b6df8b6715f55f746ba | refs/heads/master | 2021-01-19T13:42:53.229964 | 2017-08-19T21:50:45 | 2017-08-19T21:50:45 | 82,412,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | from django.shortcuts import render
# Create your views here.
def index(request):
return render(request, "ping.html") | [
"patricknevindwyer@gmail.com"
] | patricknevindwyer@gmail.com |
16afc1318862a41cd8919b74e00d65bc4f42433a | 94314048472ca6db91c90261c84cd364c6e0f703 | /python-ml-projects/main.py | 45fe293410e55ac248d8528c6f14a7b912dee678 | [] | no_license | okeefem2/python-learning | b9dae792098aba24da19a33fb55302e3cec207a9 | 2f5f5e5805e26aa185eddacc4a21adb402e000cd | refs/heads/main | 2023-04-08T07:02:20.223769 | 2023-03-20T03:17:00 | 2023-03-20T03:17:00 | 337,278,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 109 | py | from boston_housing import create_and_train_model
if __name__ == '__main__':
create_and_train_model()
| [
"okeefem355@gmail.com"
] | okeefem355@gmail.com |
566300c87730df77107c54db2fe5c86457bae7eb | fbbd0e93effba9478cbfcd99b0795f2cfdc3e394 | /quizsite/account/migrations/0017_delete_customuser.py | dfe5261added4c56367badde5583599fd4e95184 | [] | no_license | Sk-Md-Afridi/DjangoProject | 6a514e9a038d9b28df464e8b3b00ee2446ef89c9 | 98a2fefac05599df9899c3b7695c5c372a650306 | refs/heads/master | 2023-06-17T09:38:48.883796 | 2021-07-19T19:23:18 | 2021-07-19T19:23:18 | 383,184,012 | 1 | 1 | null | 2021-07-13T18:03:24 | 2021-07-05T15:20:05 | null | UTF-8 | Python | false | false | 299 | py | # Generated by Django 3.1.3 on 2021-07-14 13:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('account', '0016_auto_20210714_1913'),
]
operations = [
migrations.DeleteModel(
name='CustomUser',
),
]
| [
"skmdafridi1@gmail.com"
] | skmdafridi1@gmail.com |
2ecaa0902b36455da6e55c02523cefe6bcec5bfc | e5f4c22bfae93d3d96dea1b0ed8f3e4df373243f | /test.py | f3a74709481a1e1e55a6bdc81b7b3e3e0cf3f866 | [] | no_license | MrLokans/discover_flask | 5925a2ab07480398543d51e33c8be2cf23b2c36b | 63f847409dd67725bdef754cd0041f2647dabf4e | refs/heads/master | 2021-01-10T16:25:21.767911 | 2016-03-07T05:44:17 | 2016-03-07T05:44:17 | 52,816,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,919 | py | import unittest
from app import app
class AppTestCase(unittest.TestCase):
def setUp(self):
self.tester = app.test_client(self)
def login(self, username, password, follow_redirects=True):
return self.tester.post('/login',
data={'username': username,
'password': password},
follow_redirects=follow_redirects)
def logout(self):
return self.tester.get('/logout', follow_redirects=True)
def correctly_login(self, follow_redirects=True):
return self.login('admin', 'password', follow_redirects)
def test_index(self):
response = self.tester.get('/login', content_type='html/text')
self.assertEqual(response.status_code, 200)
def test_login_page_is_loaded(self):
response = self.tester.get('/login', content_type='html/text')
self.assertEqual(response.status_code, 200)
self.assertIn('Please login', response.data.decode('utf-8'))
def test_login_process_behaves_correctly_with_correct_creds(self):
response = self.correctly_login()
self.assertIn('Successfully logged in', response.data.decode('utf-8'))
def test_login_process_behaves_correctly_with_incorrect_creds(self):
response = self.login('incorrectuser', 'incorrectpassword')
self.assertIn('Invalid username', response.data.decode('utf-8'))
def test_logout_works(self):
response = self.correctly_login()
response = self.logout()
self.assertIn('Logged out.', response.data.decode('utf-8'))
def test_main_page_requires_user_being_logged_in(self):
response = self.tester.get('/', content_type='html/text',
follow_redirects=True)
self.assertIn('Login required', response.data.decode('utf-8'))
if __name__ == '__main__':
unittest.main()
| [
"trikster1911@gmail.com"
] | trikster1911@gmail.com |
61d8da39d048a90ac0ae92b41a27b470e2e61157 | c583812a57f993733a566bd64ec141654e52d098 | /srcpy/sim/correlators/corrCUDA.py | 7ee13584d732d1a07d0daf5ae5d27994b363a4e7 | [] | no_license | petrbojda/NavSet_unob | f1d542c7a8aba58113c67a65c26269c72f503c0f | 6d30876d5956b4c15dbc8e5093b18fe3194f7c59 | refs/heads/master | 2021-01-23T04:28:38.762219 | 2017-09-05T12:53:16 | 2017-09-05T12:53:16 | 92,927,644 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,956 | py |
import numpy as np
import accelerate.cuda.blas as blas
import accelerate.cuda.fft as ft
from numba import cuda
def corr_td_single (x1,x2):
c_12 = blas.dot(x1,x2)
return c_12
def best_grid_size(size, tpb):
bpg = np.ceil(np.array(size, dtype=np.float) / tpb).astype(np.int).tolist()
return tuple(bpg)
@cuda.jit('void(float32[:], float32[:])')
def mult_inplace(img, resp):
i = cuda.grid(1)
img[i] *= resp[i]
def corr_FD(x1,x2):
threadperblock = 32, 8
blockpergrid = best_grid_size(tuple(reversed(x1.shape)), threadperblock)
print('kernel config: %s x %s' % (blockpergrid, threadperblock))
# Trigger initialization the cuFFT system.
# This takes significant time for small dataset.
# We should not be including the time wasted here
#ft.FFTPlan(shape=x1.shape, itype=np.float32, otype=np.complex64)
X1 = x1.astype(np.float32)
X2 = x2.astype(np.float32)
stream1 = cuda.stream()
stream2 = cuda.stream()
fftplan1 = ft.FFTPlan(shape=x1.shape, itype=np.float32,
otype=np.complex64, stream=stream1)
fftplan2 = ft.FFTPlan(shape=x2.shape, itype=np.float32,
otype=np.complex64, stream=stream2)
# pagelock memory
with cuda.pinned(X1, X2):
# We can overlap the transfer of response_complex with the forward FFT
# on image_complex.
d_X1 = cuda.to_device(X1, stream=stream1)
d_X2 = cuda.to_device(X2, stream=stream2)
fftplan1.forward(d_X1, out=d_X1)
fftplan2.forward(d_X2, out=d_X2)
print ('d_X1 is ',np.shape(d_X1),type(d_X1),np.max(d_X1))
print ('d_X2 is ',np.shape(d_X2),type(d_X2),np.max(d_X2))
stream2.synchronize()
mult_inplace[blockpergrid, threadperblock, stream1](d_X1, d_X2)
fftplan1.inverse(d_X1, out=d_X1)
# implicitly synchronizes the streams
c = d_X1.copy_to_host().real / np.prod(x1.shape)
return c
| [
"petr.bojda@email.cz"
] | petr.bojda@email.cz |
c0d562717f5f884fa6080b1eab26a32a8dc4d8f5 | aae4175584f1402696b22b6f695dcb53faf85d7c | /photo_organize/__init__.py | 855991094fc8753944177b001ea25e844c2163b1 | [] | no_license | rthardin/photo_manager | e9ad5432c7795ad2ebf5ff60077ed807ef232961 | 605e4d68d489aa619e0c74d10bbacc725fed0fbb | refs/heads/master | 2021-01-10T04:33:53.791792 | 2017-01-08T03:29:34 | 2017-01-08T03:29:34 | 50,002,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24 | py | __author__ = 'ryan-mbp'
| [
"ryan.t.hardin@gmail.com"
] | ryan.t.hardin@gmail.com |
7fbac519e7757dd9ec46321b52be8cec7d80d07a | f8614b19a8231e11a17dcdc2e3ff527bada0bf34 | /Lab Exercise 9.9.2021/problem5.py | 80f2b6023878b5bdce608fc0ca1e4dafa1a03f4b | [] | no_license | GOConnell04/Python-2021 | eff508db57884d383c296ba77f9b119f2fa35ce7 | bafd696ab1f11b22b51f4b06629d60400bf56e48 | refs/heads/main | 2023-08-05T01:59:36.580414 | 2021-09-25T13:39:15 | 2021-09-25T13:39:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | ## Lab Exercise 9/9/2021 Problem 5
## Author:
## Converts inches to feet and inches
#get height from user, convert to int data type and store in height
#Add code here
#calculate feet and store in variable feet (use integer division)
#Add code here
#calculate inches and store in variable inches (use remainder operator)
#Add code here
#print result
print("Your height is", feet, "feet", inches, "inches")
## Output
## Enter your height in inches: 73
## Your height is 6 feet 1 inches
| [
"noreply@github.com"
] | noreply@github.com |
a145346bc456c2281fad96365f8d9a5af1f4cd7d | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/sets_20200609191149.py | 89a07ef2d3f4c49463319f9699998f9dd296f2fc | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,000 | py | import json
def Strings(str):
# dictionary--> key value pairs
values = {}
newArray = []
keys = []
for i in str:
newArray.append(i.split(":"))
for j in range(0,len(newArray)):
if newArray[j][0] in values:
values[j][0] =
# if newArray[j][0] in values:
# values[newArray[j][0]] += int(newArray[j][1])
# else:
# values[newArray[j][0]] = int(newArray[j][1])
# for k in values:
# keys.append(k)
# keys = sorted(keys)
# newString = ""
# last =len(keys)-1
# lastString = ""
# lastString +=keys[last] + ":" + json.dumps(values[keys[last]])
# for i in range(len(keys)-1):
# if keys[i] in values:
# newString += keys[i] + ":"+ json.dumps(values[keys[i]])+","
# finalString = newString + lastString
# print(type(finalString))
Strings(["Z:1","B:3","C:3","Z:4","B:2"])
# "B:5,C:3,Z:5"
| [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
462c27d14b829bf7308de41e30694fe80d248358 | c22f3b1b0ecfd28101be280ecbad037fe9f2196a | /boagent/utils/MyGO.py | d85930d671de6e47a1bc58772a7d95162ff08774 | [] | no_license | 5atouristspot/Botasky | ab3625da4088afeca159e9eddaae6263e99064ab | 996b1e83cf6f3d2eb6ab726d2d3ee252faed91a7 | refs/heads/master | 2020-03-16T15:08:58.475593 | 2019-04-10T02:55:36 | 2019-04-10T02:55:36 | 132,729,895 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,852 | py | #! /usr/bin/python2.7
# -*- coding: utf-8 -*-
"""
Created on 2017-4-05
@module: MyGO
@used: ssh to server
"""
import paramiko
from MyLOG import MyLog
from botasky.utils.MyFILE import project_abdir, recursiveSearchFile
logConfig = recursiveSearchFile(project_abdir, '*logConfig.ini')[0]
mylog = MyLog(logConfig, 'MyGO.py')
logger = mylog.outputLog()
__all__ = ['MyMiko']
__author__ = 'zhihao'
class MyMiko():
"""
used : go to server ,to execute cmd
"""
def __init__(self, ip_domain, port, config):
"""
used : init config and get value
:param ip_domain : target ip or domain
:param port : target port
:param config : paramikoconfig
"""
try:
self.ip_domain = ip_domain
self.port = port
self.config = config
init_info = "[action]:MyMiko init" \
"[status]:OK" \
"[ip_domain]:{ip_domain}" \
"[port]:{port}" \
"[config]:{config}".format(ip_domain=self.ip_domain, port=self.port, config=self.config)
logger.info(init_info)
except Exception, e:
print Exception, ":", e
error_msg = "[action]:MyMiko init" \
"[status]:FAIL" \
"[Errorcode]:{e}" \
"[ip_domain]:{ip_domain}" \
"[port]:{port}" \
"[config]:{config}".format(ip_domain=self.ip_domain, port=self.port, config=self.config,
e=e)
logger.error(error_msg)
def go(self):
"""
used : go to server
"""
username = self.config['username']
password = self.config['password']
key_file = self.config['key_file']
paramiko_log = recursiveSearchFile(project_abdir, '*paramiko.log')[0]
paramiko.util.log_to_file(paramiko_log)
s = paramiko.SSHClient()
s.load_system_host_keys()
s.set_missing_host_key_policy(paramiko.AutoAddPolicy())
#go to server
try:
if key_file == '' and (username != '' and password != ''):
s.connect(self.ip_domain, self.port, username, password)
elif key_file != '':
key = paramiko.RSAKey.from_private_key_file(key_file)
s.connect(self.ip_domain, self.port, username, pkey=key)
else:
error_msg = "[action]:get paramikoconfig " \
"[status]:FAIL" \
"[Errorcode]:paramikoconfig error" \
"[ip_domain]:{ip_domain}" \
"[port]:{port}" \
"[username]:{username}" \
"[password]:{password}" \
"[key_file]:{key_file}".format(ip_domain=self.ip_domain, port=self.port,
username=username, password=password, key_file=key_file)
logger.error(error_msg)
return 'paramikoconfig error'
exec_info = "[action]:go to server" \
"[status]:OK" \
"[ip_domain]:{ip_domain}" \
"[port]:{port}".format(ip_domain=self.ip_domain, port=self.port)
logger.info(exec_info)
return s
except Exception, e:
error_msg = "[action]:go to server" \
"[status]:FAIL" \
"[Errorcode]:{e}" \
"[ip_domain]:{ip_domain}" \
"[port]:{port}".format(ip_domain=self.ip_domain, port=self.port, e=e)
logger.info(error_msg)
def exec_cmd(self, go_init, cmd):
"""
used : to execute cmd
:param go_init : instance of paramiko ssh agent
:param cmd : executable cmd
"""
# execute cmd
try:
stdin, stdout, stderr = go_init.exec_command(cmd)
done_flag = stdout.channel.recv_exit_status()
stdout_info = stdout.read()
go_init.close()
if done_flag == 0:
# return normal info
exec_info = "[action]:execute cmd" \
"[status]:OK" \
"[done_flag]:{done_flag}" \
"[stdout]:{stdout}" \
"[ip_domain]:{ip_domain}" \
"[port]:{port}" \
"[cmd]:{cmd}".format(ip_domain=self.ip_domain, port=self.port, stdout=stdout_info,
done_flag=done_flag,cmd=cmd)
logger.info(exec_info)
return done_flag, stdout_info
else:
error_msg = "[action]:execute cmd" \
"[status]:FAIL" \
"[done_flag]:{done_flag}" \
"[stdout]:{stdout}" \
"[ip_domain]:{ip_domain}" \
"[port]:{port}" \
"[cmd]:{cmd}".format(ip_domain=self.ip_domain, port=self.port, stdout=stdout_info,
done_flag=done_flag,cmd=cmd)
logger.error(error_msg)
return done_flag, stdout_info
except Exception, e:
error_msg = "[action]:execute cmd" \
"[status]:FAIL" \
"[Errorcode]:{e}" \
"[stderr]:{stderr}" \
"[ip_domain]:{ip_domain}" \
"[port]:{port}" \
"[cmd]:{cmd}".format(ip_domain=self.ip_domain, port=self.port,
cmd=cmd, e=e, stderr=stderr.read())
logger.error(error_msg)
return 2, 'exec_cmd error'
if __name__ == '__main__':
paramikoconfig = {'username': 'root',
'password': 'tfkj705',
'key_file': ''}
miko = MyMiko('192.168.41.40', 22, paramikoconfig)
#print miko.go()
print 'xxxxxxx', miko.exec_cmd(miko.go(), 'mkdir /zhiha/test_paramiko6')
#print 'xxxxxxx', miko.exec_cmd(miko.go(), 'cd /zhihao && ls -l udate*')
#(0,'text') --> OK
#(1,)--> bad (mistake cmd)
#(2,)--> bad (no file)
'''
print MyMiko('192.168.41.40', 22, paramikoconfig).__class__
print MyMiko('192.168.41.40', 22, paramikoconfig).__dict__
'''
#import MyGO
#help(MyGO)
| [
"1204207658@qq.com"
] | 1204207658@qq.com |
7dcdce9e010882ff0ab3f5e4f31ec84cfbf17a7b | 6721094f2fc776faf22c997bafd409f0f65a3594 | /23.py | 2a0c382ac340ed6ca1e0957aa2f71274ec4afb5e | [] | no_license | shiro-sura/homework2 | 63968e8acf1ef65cd161cc36e7d853722cde4aec | 062b903b40c3fe3c186bc7b9deb21e90e413e880 | refs/heads/master | 2020-08-04T19:52:00.925671 | 2019-10-04T14:25:56 | 2019-10-04T14:25:56 | 212,260,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 74 | py | x=float(input("请输入摄氏度"))
f=x*33.8
print("华氏度:",f,"F")
| [
"209322813@qq.com"
] | 209322813@qq.com |
5628a03d0676455161dcd1c5b40a6e2fb286b378 | fcd85b221c4bcbd03a65e6a48100909dd89e1622 | /math_functions/triangle_func.py | c7cde2a144f15efbfe31259a51184e7d27ea9904 | [] | no_license | FireHo57/Python_Euler | f1d171b9279bc1428ef13e57e2a4b48babc405f1 | a31c2faee2ef42f054debd2ab31d2b019360d618 | refs/heads/master | 2021-09-14T17:37:16.813217 | 2018-05-16T19:49:02 | 2018-05-16T19:49:02 | 104,935,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | def get_nth_triangle(limit):
return sum( range(1,limit) )
def get_n_triangles(limit):
triangles=[]
total=0
for x in range(1, limit+1):
total+=x
triangles.append(total)
return triangles
if __name__=="__main__":
print('Nth triangle number: {}'.format(get_nth_triangle(10)))
print('First N triangle numbers: {}'.format(get_n_triangles(10)))
| [
"charlie_haddock@hotmail.co.uk"
] | charlie_haddock@hotmail.co.uk |
dfdade890a56c19a323dbc2320418057e09976e0 | 7228a01927243ff5049e44bb405bdc74d9fca2a2 | /week-03/day-2/Projects/f38.py | 3b674c09b83905a76d75d00ba4017db07494f2f5 | [] | no_license | greenfox-velox/attilakrupl | 76a61d0b2d7c014b7068b5066bcfd797d0f77a99 | 2bd567c38eff62f8b44f1d88507394ae13d61fa3 | refs/heads/master | 2021-01-17T12:38:33.374452 | 2016-10-05T14:01:51 | 2016-10-05T14:01:51 | 58,042,849 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | numbers = [7, 5, 8, -1, 2]
def own_min(nums):
a = nums[0]
for i in nums:
if i < a:
a = i
else:
pass
print (a)
own_min(numbers) | [
"krupl.attila@gmail.com"
] | krupl.attila@gmail.com |
82a5c3789f9497439c17e5f06dbdcd28f55a29eb | 41b5734f0cbda8b16ff96538cda95ec3f4d92649 | /bin/add_video_tags.py | 8ffad81f71f1b40437c79ba734ac4244bd0b2181 | [
"Apache-2.0"
] | permissive | pkolachi/acl-anthology | e255b0d118a17fd6ad092718ecd576628b518682 | e6e71f88c9894f49deaf128e295bfd543669bafc | refs/heads/master | 2021-12-21T15:44:01.520790 | 2021-12-19T01:07:20 | 2021-12-19T01:07:20 | 196,833,109 | 0 | 0 | null | 2019-07-14T12:20:42 | 2019-07-14T12:20:42 | null | UTF-8 | Python | false | false | 3,357 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Namratha Urs <namrathaurs@my.unt.edu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to add video tags to the Anthology towards ingestion of videos.
Usage:
add_video_tags.py TSV_files
where TSV_files are the tab-separated values (TSV) files containing the tuples (anthology_id, presentation_id)
Consolidates all the TSV files passed to the script, edits the XML by adding a properly-indented video tag to the
end of the <paper> element and rewrites the XML.
"""
import pandas as pd
import os
import lxml.etree as et
import argparse
from anthology.utils import deconstruct_anthology_id, make_simple_element, indent
root = os.path.dirname(os.path.abspath(__file__))
data_dir = os.path.join(root, "../data/xml")
def combine_tsv(files):
combined_df = pd.concat(
[pd.read_csv(fname, keep_default_na=False, sep="\t") for fname in files]
)
combined_df = combined_df[
(combined_df.anthology_id != "") & (combined_df.anthology_id != "nan")
]
return combined_df
def split_anth_id(anth_id):
coll_id, _, _ = deconstruct_anthology_id(anth_id)
return coll_id
def add_video_tag(anth_paper, xml_parse):
coll_id, vol_id, paper_id = deconstruct_anthology_id(anth_paper.anthology_id)
paper = xml_parse.find(f'./volume[@id="{vol_id}"]/paper[@id="{paper_id}"]')
if anth_paper.presentation_id.startswith("http"):
video_url = anth_paper.presentation_id
else:
video_url = "https://slideslive.com/{}".format(anth_paper.presentation_id)
make_simple_element("video", attrib={"tag": "video", "href": video_url}, parent=paper)
def main(args):
combo_df = combine_tsv(args['tsv_files'])
combo_df_uniques = combo_df['anthology_id'].apply(split_anth_id).unique()
for xml in os.listdir(data_dir):
fname, ext = os.path.splitext(xml)
if fname in combo_df_uniques.tolist() or fname == "2020.acl":
tree = et.parse(os.path.join(data_dir, xml))
df_subset = combo_df[combo_df['anthology_id'].str.startswith(fname)]
df_subset.apply(add_video_tag, axis=1, xml_parse=tree)
with open(os.path.join(data_dir, fname + ".xml"), 'wb') as f:
indent(tree.getroot())
tree.write(f, encoding="UTF-8", xml_declaration=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Adds video tags to the anthology XML.')
parser.add_argument(
'tsv_files',
nargs='+',
help='Two-column TSV containing (anthology_id, presentation_id)',
)
cl_args = parser.parse_args()
if cl_args == 0:
parser.print_help()
else:
main(
vars(cl_args)
) # vars converts the argparse's Namespace object to a dictionary
| [
"noreply@github.com"
] | noreply@github.com |
bcd4424dd7009abacf5f297ef813f49979cc065d | d79f9ffc7f591e68ea8d21c77779067826d56e99 | /broj priloga u korpusu_digrami.py | 59c936816aa170a4f913b494c0b48e96857b5638 | [] | no_license | sarabarac/Racunarska-lingvistika | d71d85feb9ff689bdde76f7b141e9b47d77f1259 | 93bdc70f459401061124beb6b04ece10cddde723 | refs/heads/master | 2021-07-01T16:23:03.612244 | 2018-07-10T06:33:51 | 2018-07-10T06:33:51 | 95,882,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,566 | py | # -*- coding: utf-8 -*-
brojPriloga = 0
prilozi = ["ovde", "tamo", "tu", "desno", "negde"]
korpus = "Ovde mi je baš lepo. Džunglu ne volim. Ne bih volela da sam tamo. Moje mesto je tu. Tu se osećam kao da sam kod kuće. Stavila sam svoju šoljicu kafe desno, e baš tu."
tokeniziraniKorpus = korpus.split(" ")
for reč in tokeniziraniKorpus:
# prebacivanje reči u mala slova
reč = reč.lower()
# uklanjanje interpunkcijskih znaka
reč = reč.strip(",.")
if reč in prilozi:
brojPriloga += 1 # brojPriloga = brojPriloga + 1
if reč in "prilozi":
brojPriloga =+ 1
print(brojPriloga)
korpus = "Ovaj korpus sam sastavljala dok je Isidora sedela u kancelariji i puštala pesme benda The National. Pitala me je da li mi se bend sviđa i rekla je da ona misli da je to super muzika za preko dana. Pošto bukvalno više nemam ideja šta da kucam ovde, nastaviću da ispisujem šta se sve nalazi oko mene. Levo od mene su dva tanjira koje je Milena oprala. Deluju čisto. S leve strane računara nalazi se tirkizni lak čije je ime u stvari Green. Nije mi baš najjasnije ko tu ne vidi boje. S desne strane računara nalazi se providni lak, ali je bočica obojena u crno, pa je Tijana zbog toga prošlog seminara mislila da je crn. Nejasni su mi proizvođači lakova. "
korpus = korpus.lower()
digrami = list()
for i in range(len(korpus)):
digram = korpus[i:i+2]
digrami.append(digram)
print(digrami)
print(set(digrami))
ttr = len(set(digrami))/len(digrami)
print(ttr)
| [
"noreply@github.com"
] | noreply@github.com |
d7804221cd2a32518fecd25e6afce317a715027d | 09ae2652bbb0bc184757739bab936c0e367085a5 | /crypto_cli/chiper/full_vigenere.py | 86022bc6ffd732853cb4ccca63b0cf1167c43c3f | [] | no_license | ridho9/crypto-cli | 1b6d989e7c1a61a165de1c1b665cd4e332854122 | 8e3b59a83586b911baf8a1995e0da1105e430047 | refs/heads/master | 2020-12-18T21:33:37.363725 | 2020-01-27T08:59:45 | 2020-01-27T08:59:45 | 235,106,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,148 | py | import click
from crypto_cli.util.char import lalpha_ord, is_int_alpha
class FullVigenereChiper:
def __init__(self, square, key):
self.square = square
self.key = key
def encode(self, plain: bytes) -> bytes:
result = []
key_idx = 0
key_len = len(self.key)
for c in plain:
if is_int_alpha(c):
c_ord = lalpha_ord(c)
k_ord = lalpha_ord(self.key[key_idx])
res_ord = self.square[c_ord][k_ord]
result.append(res_ord)
key_idx = (key_idx + 1) % key_len
else:
result.append(c)
return bytes(result)
def decode(self, chiper: bytes) -> bytes:
result = []
key_idx = 0
key_len = len(self.key)
for c in chiper:
if is_int_alpha(c):
c_ord = lalpha_ord(c)
k_ord = lalpha_ord(self.key[key_idx])
for row_num in range(len(self.square)):
row = self.square[row_num]
res_ord = row[k_ord] - ord("a")
if res_ord == c_ord:
break
result.append(row_num + ord("a"))
key_idx = (key_idx + 1) % key_len
else:
result.append(c)
return bytes(result)
@staticmethod
@click.command("full_vigenere", help="Full Vigenere Chiper")
@click.argument("square", type=click.File("rb", lazy=True))
@click.argument("key", type=click.STRING)
def command(square, key):
def processor(ctx):
chiper = FullVigenereChiper(square.readlines(), key.encode())
ctx["chipers"].append(chiper)
return ctx
return processor
if __name__ == "__main__":
with open("key.txt", "rb") as f:
square = f.readlines()
chiper = FullVigenereChiper(square, b"abcde")
plaintext = b"the quick brown fox jumps over the lazy dog"
chipertext = chiper.encode(plaintext)
print(f"{chipertext=}")
undechiper = chiper.decode(chipertext)
print(f"{undechiper=}")
| [
"p.ridho@yahoo.co.id"
] | p.ridho@yahoo.co.id |
f77ffc69cb16459c8138b3e8578323ac411365e2 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-4/5644d97cc5014e18b14799feeb9b354d528a6489-<test_invalid_interfaces>-bug.py | 88bcd68511e3ab151bad7e95439f0d409610e661 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | def test_invalid_interfaces(self):
event = self.create_sample_event(platform='invalid-interfaces')
self.browser.get('/{}/{}/issues/{}/'.format(self.org.slug, self.project.slug, event.group.id))
self.browser.wait_until('.entries')
self.browser.snapshot('issue details invalid interfaces') | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
e084317ce402363aaaa6cec53d9f2e5eb9ef4784 | 6c33729791ce32fc034c93e3108aa36b04d0d356 | /menu/migrations/0005_abooutusmodel.py | b502c5f6ef4a1c5139331db27426b1da75eaf394 | [] | no_license | Kaimkilwa/routefinder_website | d4e30a8bcffcc2b36bf7599d60d8c1a31fda4e0a | 3b122e7f3ed86a6b300e0337771d817da24148db | refs/heads/master | 2020-09-16T09:04:39.672707 | 2019-11-24T09:48:17 | 2019-11-24T09:48:17 | 223,720,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,012 | py | # Generated by Django 2.2.6 on 2019-10-25 16:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('menu', '0004_categories_description'),
]
operations = [
migrations.CreateModel(
name='AbooutUsModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.FileField(blank=True, null=True, upload_to='about/')),
('title', models.CharField(max_length=200, unique=True)),
('slug', models.SlugField(max_length=200, unique=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('description', models.TextField(blank=True, null=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
('status', models.IntegerField(choices=[(0, 'Draft'), (1, 'Publish')], default=0)),
],
),
]
| [
"55236785+Kaimkilwa@users.noreply.github.com"
] | 55236785+Kaimkilwa@users.noreply.github.com |
a23ae7933fd92b9c8ac89dc6d1231bca721bf4e4 | 017f8fe77d628e5cae29d63b2298c163a4c82d13 | /pymongo/pymongo4.py | f9afbc3c621b3bfc65cc55a267d7fd3e407cfcd7 | [
"MIT"
] | permissive | TimothyDJones/learn-python | 571744b1c8be34b65868cef5859d6a634a792efa | 687239572bee5e5c94bebb6b175b3fba4d47600e | refs/heads/master | 2023-01-03T05:24:27.597861 | 2020-11-02T20:28:15 | 2020-11-02T20:28:15 | 297,409,145 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,090 | py | import pymongo
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["mydatabase"]
print(myclient.list_database_names())
dblist = myclient.list_database_names()
if "mydatabase" in dblist:
print("'mydatabase' found in MongoDB!")
# Create a collection ("table")
mycol = mydb["customer"]
collist = mydb.list_collection_names()
print(collist)
if "customer" in collist:
print("'customer' collection found in 'mydatabase'!")
# Find the *first* record in collection
result = mycol.find_one()
print(result)
# Find *ALL* results in collection
for result in mycol.find():
print(result)
# Suppress display of the "_id" field
for result in mycol.find({}, { "_id": 0, "name": 1, "address": 1 }):
print(result)
# Suppress display of the "address" field
for result in mycol.find({}, { "address": 0 }):
print(result)
# Filter results using regex
myquery = { "address": { "$regex": "^8" } }
for result in mycol.find(myquery):
print(result)
# Sort by name in *descending* order (all results)
mydoc = mycol.find().sort("name", -1)
for result in mydoc:
print(result)
| [
"tdjones74021@yahoo.com"
] | tdjones74021@yahoo.com |
60c738119f59b161bc7c88e74ff2f6c9ae41ba36 | 504ec1cd9116713ee28e7a599554ce406812b529 | /TEXTBASEDGAME.py | e56d278f41affb281d19fc3370793e7ccb598f43 | [] | no_license | 2019osbjoh/adventure-game | ef0a38b648ee82f6ff79808b69c983a575dd79b4 | 88dc4e8da1dedf0e50c77a75fc34bc3cbda96553 | refs/heads/master | 2020-09-05T09:23:24.366961 | 2019-11-06T22:04:59 | 2019-11-06T22:04:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,385 | py | import random
import keyboard
import sys, os # Used to check operating system and clear the shell
# Prints out an array line by line
def printMap(j, MAP):
for i in MAP:
print(MAP[j][0])
j = j + 1
def waitForKey():
keyboard.wait('esc')
# Clears Screen (should work on Windows and OSX)
def clearScreen(operatingSystem):
if operatingSystem == 'darwin':
os.system('clear')
elif operatingSystem == 'win32':
os.system('cls')
# Will save the game
def saveGame():
return 0
OS = sys.platform
playing = False
num = 0
mapOne = [["############"], ["# #"], ["# #"], ["# @ #"], ["# #"], ["# #"], ["# #"], ["############"]]
choice = ""
# what we plan to do:
# player = @ symbol
# walls are #
# you can't hit the walls
# secret rooms (sorcerers cave sytle) you can't see the rooms until you move into them
# ? = key - you won't see the door until you pick up the key
# the keys are colour coded for certain doors and the key might ot correspond to the door of the room you are in
print("\nText based adventure")
print("\nWelcome to this adventure!\n\t1)Start a new game")
choice = int(input("\nEnter a number: "))
if choice == 1:
playing = True
else:
print("ERROR - Invalid input")
while playing == True:
clearScreen(OS)
print("\nText based adventure\n")
printMap(num, mapOne)
waitForKey()
playing = False
| [
"john.osborne151@gmail.com"
] | john.osborne151@gmail.com |
f19df9ac7447b0432efdfecf90c9cd14f076163d | b5502ea9580e882a1c530f6e5914ce090e2d00a6 | /experiment/CASSANDRA/cassandra/pylib/cqlshlib/cqlhandling.py | a0917b3b4600c267134b92c852710b626c279689 | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-public-domain"
] | permissive | Lingzhi-Ouyang/Almost-Strong-Consistency-Cassandra | a3fb54a8c35805a6810532719d7a8da99998f997 | 2b9aa8e78e134fc8811ea6caff3daca1f4a92936 | refs/heads/master | 2021-07-09T01:13:04.424482 | 2020-11-03T03:12:13 | 2020-11-03T03:12:13 | 205,116,322 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 13,310 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# code for dealing with CQL's syntax, rules, interpretation
# i.e., stuff that's not necessarily cqlsh-specific
import traceback
from cassandra.metadata import cql_keywords_reserved
from . import pylexotron, util
Hint = pylexotron.Hint
class CqlParsingRuleSet(pylexotron.ParsingRuleSet):
available_compression_classes = (
'DeflateCompressor',
'SnappyCompressor',
'LZ4Compressor',
)
available_compaction_classes = (
'LeveledCompactionStrategy',
'SizeTieredCompactionStrategy',
'DateTieredCompactionStrategy'
)
replication_strategies = (
'SimpleStrategy',
'OldNetworkTopologyStrategy',
'NetworkTopologyStrategy'
)
replication_factor_strategies = (
'SimpleStrategy',
'org.apache.cassandra.locator.SimpleStrategy',
'OldNetworkTopologyStrategy',
'org.apache.cassandra.locator.OldNetworkTopologyStrategy'
)
def __init__(self, *args, **kwargs):
pylexotron.ParsingRuleSet.__init__(self, *args, **kwargs)
# note: commands_end_with_newline may be extended by callers.
self.commands_end_with_newline = set()
self.set_reserved_keywords(cql_keywords_reserved)
def set_reserved_keywords(self, keywords):
"""
We cannot let resreved cql keywords be simple 'identifier' since this caused
problems with completion, see CASSANDRA-10415
"""
syntax = '<reserved_identifier> ::= /(' + '|'.join(r'\b{}\b'.format(k) for k in keywords) + ')/ ;'
self.append_rules(syntax)
def completer_for(self, rulename, symname):
def registrator(f):
def completerwrapper(ctxt):
cass = ctxt.get_binding('cassandra_conn', None)
if cass is None:
return ()
return f(ctxt, cass)
completerwrapper.func_name = 'completerwrapper_on_' + f.func_name
self.register_completer(completerwrapper, rulename, symname)
return completerwrapper
return registrator
def explain_completion(self, rulename, symname, explanation=None):
if explanation is None:
explanation = '<%s>' % (symname,)
@self.completer_for(rulename, symname)
def explainer(ctxt, cass):
return [Hint(explanation)]
return explainer
def cql_massage_tokens(self, toklist):
curstmt = []
output = []
term_on_nl = False
for t in toklist:
if t[0] == 'endline':
if term_on_nl:
t = ('endtoken',) + t[1:]
else:
# don't put any 'endline' tokens in output
continue
# Convert all unicode tokens to ascii, where possible. This
# helps avoid problems with performing unicode-incompatible
# operations on tokens (like .lower()). See CASSANDRA-9083
# for one example of this.
str_token = t[1]
if isinstance(str_token, unicode):
try:
str_token = str_token.encode('ascii')
t = (t[0], str_token) + t[2:]
except UnicodeEncodeError:
pass
curstmt.append(t)
if t[0] == 'endtoken':
term_on_nl = False
output.extend(curstmt)
curstmt = []
else:
if len(curstmt) == 1:
# first token in statement; command word
cmd = t[1].lower()
term_on_nl = bool(cmd in self.commands_end_with_newline)
output.extend(curstmt)
return output
def cql_parse(self, text, startsymbol='Start'):
tokens = self.lex(text)
tokens = self.cql_massage_tokens(tokens)
return self.parse(startsymbol, tokens, init_bindings={'*SRC*': text})
def cql_whole_parse_tokens(self, toklist, srcstr=None, startsymbol='Start'):
return self.whole_match(startsymbol, toklist, srcstr=srcstr)
def cql_split_statements(self, text):
tokens = self.lex(text)
tokens = self.cql_massage_tokens(tokens)
stmts = util.split_list(tokens, lambda t: t[0] == 'endtoken')
output = []
in_batch = False
in_pg_string = len([st for st in tokens if len(st) > 0 and st[0] == 'unclosedPgString']) == 1
for stmt in stmts:
if in_batch:
output[-1].extend(stmt)
else:
output.append(stmt)
if len(stmt) > 2:
if stmt[-3][1].upper() == 'APPLY':
in_batch = False
elif stmt[0][1].upper() == 'BEGIN':
in_batch = True
return output, in_batch or in_pg_string
def cql_complete_single(self, text, partial, init_bindings={}, ignore_case=True,
startsymbol='Start'):
tokens = (self.cql_split_statements(text)[0] or [[]])[-1]
bindings = init_bindings.copy()
# handle some different completion scenarios- in particular, completing
# inside a string literal
prefix = None
dequoter = util.identity
lasttype = None
if tokens:
lasttype = tokens[-1][0]
if lasttype == 'unclosedString':
prefix = self.token_dequote(tokens[-1])
tokens = tokens[:-1]
partial = prefix + partial
dequoter = self.dequote_value
requoter = self.escape_value
elif lasttype == 'unclosedName':
prefix = self.token_dequote(tokens[-1])
tokens = tokens[:-1]
partial = prefix + partial
dequoter = self.dequote_name
requoter = self.escape_name
elif lasttype == 'unclosedComment':
return []
bindings['partial'] = partial
bindings['*LASTTYPE*'] = lasttype
bindings['*SRC*'] = text
# find completions for the position
completions = self.complete(startsymbol, tokens, bindings)
hints, strcompletes = util.list_bifilter(pylexotron.is_hint, completions)
# it's possible to get a newline token from completion; of course, we
# don't want to actually have that be a candidate, we just want to hint
if '\n' in strcompletes:
strcompletes.remove('\n')
if partial == '':
hints.append(Hint('<enter>'))
# find matches with the partial word under completion
if ignore_case:
partial = partial.lower()
f = lambda s: s and dequoter(s).lower().startswith(partial)
else:
f = lambda s: s and dequoter(s).startswith(partial)
candidates = filter(f, strcompletes)
if prefix is not None:
# dequote, re-escape, strip quotes: gets us the right quoted text
# for completion. the opening quote is already there on the command
# line and not part of the word under completion, and readline
# fills in the closing quote for us.
candidates = [requoter(dequoter(c))[len(prefix) + 1:-1] for c in candidates]
# the above process can result in an empty string; this doesn't help for
# completions
candidates = filter(None, candidates)
# prefix a space when desirable for pleasant cql formatting
if tokens:
newcandidates = []
for c in candidates:
if self.want_space_between(tokens[-1], c) \
and prefix is None \
and not text[-1].isspace() \
and not c[0].isspace():
c = ' ' + c
newcandidates.append(c)
candidates = newcandidates
# append a space for single, complete identifiers
if len(candidates) == 1 and candidates[0][-1].isalnum() \
and lasttype != 'unclosedString' \
and lasttype != 'unclosedName':
candidates[0] += ' '
return candidates, hints
@staticmethod
def want_space_between(tok, following):
if following in (',', ')', ':'):
return False
if tok[0] == 'op' and tok[1] in (',', ')', '='):
return True
if tok[0] == 'stringLiteral' and following[0] != ';':
return True
if tok[0] == 'star' and following[0] != ')':
return True
if tok[0] == 'endtoken':
return True
if tok[1][-1].isalnum() and following[0] != ',':
return True
return False
def cql_complete(self, text, partial, cassandra_conn=None, ignore_case=True, debug=False,
startsymbol='Start'):
init_bindings = {'cassandra_conn': cassandra_conn}
if debug:
init_bindings['*DEBUG*'] = True
print "cql_complete(%r, partial=%r)" % (text, partial)
completions, hints = self.cql_complete_single(text, partial, init_bindings,
startsymbol=startsymbol)
if hints:
hints = [h.text for h in hints]
hints.append('')
if len(completions) == 1 and len(hints) == 0:
c = completions[0]
if debug:
print "** Got one completion: %r. Checking for further matches...\n" % (c,)
if not c.isspace():
new_c = self.cql_complete_multiple(text, c, init_bindings, startsymbol=startsymbol)
completions = [new_c]
if debug:
print "** New list of completions: %r" % (completions,)
return hints + completions
def cql_complete_multiple(self, text, first, init_bindings, startsymbol='Start'):
debug = init_bindings.get('*DEBUG*', False)
try:
completions, hints = self.cql_complete_single(text + first, '', init_bindings,
startsymbol=startsymbol)
except Exception:
if debug:
print "** completion expansion had a problem:"
traceback.print_exc()
return first
if hints:
if not first[-1].isspace():
first += ' '
if debug:
print "** completion expansion found hints: %r" % (hints,)
return first
if len(completions) == 1 and completions[0] != '':
if debug:
print "** Got another completion: %r." % (completions[0],)
if completions[0][0] in (',', ')', ':') and first[-1] == ' ':
first = first[:-1]
first += completions[0]
else:
common_prefix = util.find_common_prefix(completions)
if common_prefix == '':
return first
if common_prefix[0] in (',', ')', ':') and first[-1] == ' ':
first = first[:-1]
if debug:
print "** Got a partial completion: %r." % (common_prefix,)
return first + common_prefix
if debug:
print "** New total completion: %r. Checking for further matches...\n" % (first,)
return self.cql_complete_multiple(text, first, init_bindings, startsymbol=startsymbol)
@staticmethod
def cql_extract_orig(toklist, srcstr):
# low end of span for first token, to high end of span for last token
return srcstr[toklist[0][2][0]:toklist[-1][2][1]]
@staticmethod
def token_dequote(tok):
if tok[0] == 'unclosedName':
# strip one quote
return tok[1][1:].replace('""', '"')
if tok[0] == 'quotedStringLiteral':
# strip quotes
return tok[1][1:-1].replace("''", "'")
if tok[0] == 'unclosedString':
# strip one quote
return tok[1][1:].replace("''", "'")
if tok[0] == 'unclosedComment':
return ''
return tok[1]
@staticmethod
def token_is_word(tok):
return tok[0] == 'identifier'
| [
"lingzhi.ouyang@outlook.com"
] | lingzhi.ouyang@outlook.com |
6e242cc43e2c7d24c5cfd1f02e749621f9366a0e | 0bfb4208bdf7fcfd75311c777e25a3b639bf566d | /backend/code/iep/auth/models.py | fb6f856736cbe2fd1a25f1dc89baf52a17eff536 | [
"Apache-2.0"
] | permissive | socek/iep | ab7833f94af739abd19f569f28de84cdcc689e95 | 793e35ca5304eef7b7dacb5dd8d486622f497759 | refs/heads/master | 2020-05-16T13:48:12.252161 | 2019-12-03T08:28:05 | 2019-12-03T08:28:05 | 183,082,207 | 0 | 0 | Apache-2.0 | 2019-12-03T08:28:07 | 2019-04-23T19:24:49 | Python | UTF-8 | Python | false | false | 1,193 | py | from bcrypt import checkpw
from bcrypt import gensalt
from bcrypt import hashpw
from iep.application.model import Model
class User(Model):
def __init__(
self,
uid,
created_at=None,
updated_at=None,
name=None,
email=None,
is_admin=None,
password=None,
):
super().__init__(uid, created_at, updated_at)
self.name = name
self.email = email
self.is_admin = is_admin
self.password = password
def do_password_match(self, password):
"""
Validate if provided password match with the password from the model.
"""
if self.password:
return checkpw(password.encode("utf8"), self.password)
else:
return False
def set_password(self, password):
self.password = hashpw(password.encode("utf8"), gensalt())
def to_dict(self):
return {
'uid': self.uid,
'created_at': self.created_at,
'updated_at': self.updated_at,
'name': self.name,
'email': self.email,
'is_admin': self.is_admin,
'password': self.password,
}
| [
"msocek@gmail.com"
] | msocek@gmail.com |
8a463bda0d0c60cd4f34f3e9d156d3254165acfc | ebfcae1c5ba2997b2ac4471d5bedc3f5daffcb31 | /repos/Flask-Large-Application-Example-master/tests/views/test_pypi_packages.py | 27394594cc76c8ccde073c14c83e1f2757b0f036 | [
"MIT"
] | permissive | babiato/flaskapp1 | 84de2d0b26a54f5820d3bbe97926782ad41e005c | 530beb9e3b8516e0e93960b99521c23a523ef546 | refs/heads/master | 2023-02-26T16:36:49.760632 | 2021-02-04T09:08:40 | 2021-02-04T09:08:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,239 | py | from flask import current_app
from flask.ext.celery import CELERY_LOCK
import pytest
from redis.exceptions import LockError
from pypi_portal.extensions import db, redis
from pypi_portal.models.pypi import Package
from pypi_portal.models.redis import POLL_SIMPLE_THROTTLE
from pypi_portal.tasks import pypi
class FakeDelay(object):
@staticmethod
def ready():
return False
def test_index():
assert '200 OK' == current_app.test_client().get('/pypi/').status
def test_sync_empty(alter_xmlrpc):
alter_xmlrpc(set())
redis.delete(POLL_SIMPLE_THROTTLE)
Package.query.delete()
db.session.commit()
assert '302 FOUND' == current_app.test_client().get('/pypi/sync').status
assert [] == db.session.query(Package.name, Package.summary, Package.latest_version).all()
def test_sync_few(alter_xmlrpc):
alter_xmlrpc([dict(name='packageB', summary='Test package.', version='3.0.0'), ])
redis.delete(POLL_SIMPLE_THROTTLE)
assert '302 FOUND' == current_app.test_client().get('/pypi/sync').status
expected = [('packageB', 'Test package.', '3.0.0'), ]
actual = db.session.query(Package.name, Package.summary, Package.latest_version).all()
assert expected == actual
def test_sync_rate_limit(alter_xmlrpc):
alter_xmlrpc([dict(name='packageC', summary='Test package.', version='3.0.0'), ])
assert '302 FOUND' == current_app.test_client().get('/pypi/sync').status
expected = [('packageB', 'Test package.', '3.0.0'), ]
actual = db.session.query(Package.name, Package.summary, Package.latest_version).all()
assert expected == actual
def test_sync_parallel(alter_xmlrpc):
alter_xmlrpc([dict(name='packageD', summary='Test package.', version='3.0.0'), ])
redis.delete(POLL_SIMPLE_THROTTLE)
redis_key = CELERY_LOCK.format(task_name='pypi_portal.tasks.pypi.update_package_list')
lock = redis.lock(redis_key, timeout=1)
assert lock.acquire(blocking=False)
assert '302 FOUND' == current_app.test_client().get('/pypi/sync').status
expected = [('packageB', 'Test package.', '3.0.0'), ]
actual = db.session.query(Package.name, Package.summary, Package.latest_version).all()
assert expected == actual
try:
lock.release()
except LockError:
pass
def test_sync_many(alter_xmlrpc):
alter_xmlrpc([
dict(name='packageB1', summary='Test package.', version='3.0.0'),
dict(name='packageB2', summary='Test package.', version='3.0.0'),
dict(name='packageB3', summary='Test package.', version='3.0.0'),
dict(name='packageB4', summary='Test package.', version='3.0.0'),
dict(name='packageB5', summary='Test package.', version='3.0.0'),
])
redis.delete(POLL_SIMPLE_THROTTLE)
assert '302 FOUND' == current_app.test_client().get('/pypi/sync').status
expected = [
('packageB', 'Test package.', '3.0.0'), ('packageB1', 'Test package.', '3.0.0'),
('packageB2', 'Test package.', '3.0.0'), ('packageB3', 'Test package.', '3.0.0'),
('packageB4', 'Test package.', '3.0.0'), ('packageB5', 'Test package.', '3.0.0'),
]
actual = db.session.query(Package.name, Package.summary, Package.latest_version).all()
assert sorted(expected) == sorted(actual)
def test_sync_unhandled_exception():
old_throttle = pypi.THROTTLE
pypi.THROTTLE = 'nan'
redis.delete(POLL_SIMPLE_THROTTLE)
with pytest.raises(ValueError):
current_app.test_client().get('/pypi/sync').status()
pypi.THROTTLE = old_throttle
def test_sync_timeout():
old_delay = pypi.update_package_list.delay
pypi.update_package_list.delay = FakeDelay
redis.delete(POLL_SIMPLE_THROTTLE)
assert '302 FOUND' == current_app.test_client().get('/pypi/sync').status
expected = [
('packageB', 'Test package.', '3.0.0'), ('packageB1', 'Test package.', '3.0.0'),
('packageB2', 'Test package.', '3.0.0'), ('packageB3', 'Test package.', '3.0.0'),
('packageB4', 'Test package.', '3.0.0'), ('packageB5', 'Test package.', '3.0.0'),
]
actual = db.session.query(Package.name, Package.summary, Package.latest_version).all()
assert sorted(expected) == sorted(actual)
pypi.update_package_list.delay = old_delay
| [
"jinxufang@tencent.com"
] | jinxufang@tencent.com |
bb4d3c4ffba8b3fdd9dae18528199a1e9560a1a0 | 43ede7b8fb546c00804c0ef94501f6e48ba170d6 | /Cursos Python/Python 3 - Solyd/Orientacao_a_objeto.py | e902f0d109aa9feef7f8a68a9651bc74a65cd1bb | [] | no_license | bopopescu/Python-13 | db407d17252473e78e705e563cfee4dbd316c6b9 | c8bef500f2d3e4a63d850f96dfa219eff2ecebda | refs/heads/master | 2022-11-22T16:24:08.490879 | 2020-06-11T14:22:24 | 2020-06-11T14:22:24 | 281,830,055 | 0 | 0 | null | 2020-07-23T02:26:31 | 2020-07-23T02:26:30 | null | UTF-8 | Python | false | false | 1,339 | py | class Cliente:
def __init__(self, nome, cpf, idade):
self.__nome = nome
self.__cpf = cpf
self.__idade = idade
def dados_cliente(self):
return {'nome': self.__nome,
'cpf': self.__cpf,
'idade': self.__idade}
class Conta(Cliente):
def __init__(self, nome, cpf, idade, saldo, limite):
super().__init__(nome, cpf, idade)
# Representante da conta
self.__nome = nome
self.__cpf = cpf
self.__idade = idade
# dados da conta
self.__saldo = float(saldo)
self.__limite = float(limite)
def saldo_atual(self):
print(f'Saldo atual: R${self.__saldo:.2f}')
def dono(self):
print('nome: ', self.__nome)
print('cpf:', self.__cpf)
print('idade :', self.__idade)
def sacar(self, valor_saque):
self.__saldo -= float(valor_saque)
print(f'Saque de R${valor_saque}, Realizado com sucesso!')
def depositar(self, valor_deposito):
self.__saldo += float(valor_deposito)
cliente = Cliente('Erickson', '19542634-05', 18)
dc = cliente.dados_cliente()
conta = Conta(dc['nome'], dc['cpf'], dc['idade'], 1500.00, 5000.00)
conta.saldo_atual()
conta.sacar(257.05)
conta.saldo_atual()
conta.saldo_atual()
conta.depositar(750.00)
conta.saldo_atual()
| [
"ofc.erickson@gmail.com"
] | ofc.erickson@gmail.com |
796965104f9a8b405aea58339305c0e917d2c247 | 7aae3051a7d08a280f7adc55b4b984bc48c87db3 | /vehicle/admins/vehicle_model_admin.py | ba26d4ec5f9adf2698da8711bc9fa8bd44e5b5a4 | [] | no_license | ohahlev/ahlev-django-vehicle | d087375e3b49cda9253a776f79e4531bbf0a686d | 51895c200b40be7a298a4054ba2d8945df6a84d0 | refs/heads/master | 2020-11-30T07:00:12.441028 | 2020-01-21T01:25:48 | 2020-01-21T01:25:48 | 230,340,642 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,253 | py | from django.utils.html import format_html
from django.contrib import admin
from imagekit import ImageSpec
from imagekit.admin import AdminThumbnail
from imagekit.processors import ResizeToFill
from imagekit.cachefiles import ImageCacheFile
from ..models.vehicle_model import VehicleModel
from .widgets import AdminSmallestThumbnailSpec, AdminSmallThumbnailSpec
class VehicleModelAdmin(admin.ModelAdmin):
def preview_thumbnail(self, obj):
if obj.logo_thumbnail:
return format_html(u"<img src='{}'/>", obj.logo_thumbnail.url)
preview_thumbnail.short_description = 'Preview'
readonly_fields = ['preview_thumbnail']
fieldsets = [
("NAME", {
'fields': ['name', 'logo', 'preview_thumbnail'],
}),
]
search_fields = ['name']
list_display = ['name', 'preview_thumbnail', 'date_created', 'last_updated']
class Media:
css = {
'all': (
'vehicle/css/vehicle.css',
)
}
'''
js = (
'js/jquery.min.js',
'js/popper.min.js',
'js/bootstrap.min.js',
'js/mdb.min.js',
'js/myscript.js'
)
'''
admin.site.register(VehicleModel, VehicleModelAdmin) | [
"ohahlev@gmail.com"
] | ohahlev@gmail.com |
3140c1a44243603fd2dff8d0323632226ab72742 | 6d1c82eee36aa2e6efc8d702e70b176982e6124b | /maple/adapters/http.py | 58bb0276be35f468bfe784b327cb9416cc68d5a0 | [
"MIT"
] | permissive | akand074/maplepy | bd42e194df531831ab297c230db74b9706a210db | 7c99570fb107b099cfe917b1fec0a9c4d975a2db | refs/heads/master | 2021-05-07T18:15:38.213665 | 2018-02-11T21:25:02 | 2018-02-11T21:25:02 | 108,761,905 | 2 | 0 | MIT | 2018-02-11T21:25:03 | 2017-10-29T18:55:34 | Python | UTF-8 | Python | false | false | 346 | py |
class HttpAdapter(object):
def schema(self, schema):
raise NotImplementedError()
def query(self, query_string, params={}):
raise NotImplementedError()
def mutation(self, mutate_string, params={}):
raise NotImplementedError()
def execute(self, query, variables={}):
raise NotImplementedError()
| [
"noreply@github.com"
] | noreply@github.com |
ccf9a734c56a27aad1c7b63e96282803ea84b5a4 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/f1dbf6292b80b7cc67661707e7f1d8b5b0a06eb5-<check_params>-bug.py | b748d6d0116f380fc5635eaf4ef57ebc08f34ef9 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 761 | py | def check_params(self):
'Check all input params'
if (not self.key_id.isdigit()):
self.module.fail_json(msg='Error: key_id is not digit.')
if ((int(self.key_id) < 1) or (int(self.key_id) > 4294967295)):
self.module.fail_json(msg='Error: The length of key_id is between 1 and 4294967295.')
if (self.state == 'present'):
if ((self.auth_type == 'encrypt') and ((len(self.password) < 20) or (len(self.password) > 392))):
self.module.fail_json(msg='Error: The length of encrypted password is between 20 and 392.')
elif ((self.auth_type == 'text') and ((len(self.password) < 1) or (len(self.password) > 255))):
self.module.fail_json(msg='Error: The length of text password is between 1 and 255.') | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
62a702a14533fbce06d8c0f429a40f79d961bd69 | 86e46e40112015487bf4eef9990e68574c2f8cb1 | /DLSecurity/test_SR.py | c6ae20ea6d9a5e987b305622c3993c0ed1d56c63 | [] | no_license | AkshayGadre/M2019 | a938318a57968db814677cb957e116b188077c23 | 7aabc44562629d1c12ad43eab25e58a0994d1763 | refs/heads/master | 2020-05-30T14:07:50.881587 | 2019-06-01T22:52:00 | 2019-06-01T22:52:00 | 189,782,292 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | data = []
labels = []
data_dim = 2048
mem_units = 21
sampleL = (data_dim/2)*mem_units
num_classes = get_data(data, labels, mem_units, sampleL)
# load json and create model
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("model.h5")
print("Loaded model from disk")
# evaluate loaded model on test data
loaded_model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
score = loaded_model.evaluate(data, labels, verbose=0)
print("%s: %.2f%%" % (loaded_model.metrics_names[1], score[1]*100))
| [
"noreply@github.com"
] | noreply@github.com |
cc2f26de6d45f12fbb7681f41bb1b6e37790fcb8 | e68a19d1c4e0febcf2d5e9fb8663c38b5648b592 | /trace/fault_tolerance/trace_parser_mulit.py | f536612ded76a7db89868fa25ebd03b28363c430 | [] | no_license | nsdi2017-ddn/ddn | 60261bb3964708f865412b428793508e262b4806 | 46e7ae06097c79f87e96c4d220a7669a70b07f63 | refs/heads/master | 2021-05-03T19:16:55.022536 | 2016-12-01T16:18:31 | 2016-12-01T16:18:31 | 69,271,348 | 18 | 7 | null | null | null | null | UTF-8 | Python | false | false | 6,033 | py | #!/usr/bin/python
# parse the trace and test
#
# Author: Shijie Sun
# Email: septimus145@gmail.com
# Sept, 2016
import time
import threading
import urllib
import urllib2
import Queue
import sys
import random
from itertools import izip
TIMEOUT = 3 # timeout restriction for requests
UPDATE_DELAY = 2 # delay time from receiving decision to send update
URL = []
trace_start_time = 0
trace_finish_time = 0
update_queue = Queue.Queue() # message queue for request
request_num = [] # number of requests sent and succeeded [[send1, succeeded1], ... , [send2, succeeded2]]
load_dict_list = []
cost_list = []
def request_performer(*trace):
global update_queue
global request_num
global load_dict_list
global cost_list
curr_second = trace[0] - trace_start_time
curr_minute = curr_second / 60
request_num[curr_second][0] += 1
values = {'payload' : trace[1] + '\t'.join(trace[2].keys()), 'method' : 'request'}
decision = ''
url_idx = trace[4] % len(URL)
try:
con = urllib2.urlopen(URL[url_idx], urllib.urlencode(values), timeout=TIMEOUT)
decision = con.read().strip()
except Exception as inst:
print(inst)
request_num[curr_second][1] += 1
decision = trace[2].keys()[2]
print "IM in trouble ---" + str(trace[2][decision])
fout1.write("%d,%s,%s\n"%(url_idx,"local",str(trace[2][decision])))
cost_list[curr_second] += float(trace[2][decision])
return
# if decision is not in decision_list
if not trace[2].has_key(decision):
return
request_num[curr_second][1] += 1
# update the load dict
if not load_dict_list[curr_minute].has_key(decision):
load_dict_list[curr_minute][decision] = 1
else:
load_dict_list[curr_minute][decision] += 1
cost_factor = 1
#if sum(load_dict_list[curr_minute].values()) > 0:
# load = load_dict_list[curr_minute][decision] / float(load_dict_list[curr_minute]['total_sessions'])
# for key in sorted(trace[3][decision].keys(), reverse=True):
# if load > key:
# cost_factor = trace[3][decision][key]
# break
cost = cost_factor * float(trace[2][decision])
fout1.write("%d,%s,%s\n"%(url_idx,"online",str(trace[2][decision])))
print "IM ok ---" + str(trace[2][decision])
cost_list[curr_second] += cost
update_str = trace[1] + decision + '\t' + str(cost)
update_queue.put([time.time() + UPDATE_DELAY, update_str, url_idx])
def update_performer():
global update_queue
while True:
while update_queue.empty():
time.sleep(0.05)
info = update_queue.get()
while time.time() < info[0]:
time.sleep(0.05)
try:
con = urllib2.urlopen(URL[info[2]], urllib.urlencode({'payload' : info[1], 'method' : 'update'}), timeout=TIMEOUT)
except Exception as inst:
print(inst)
if __name__ == '__main__':
#global URL
#global trace_start_time
#global trace_finish_time
#global update_queue
#global request_num
#global load_dict_list
#global cost_list
if len(sys.argv) < 3:
print "Usage: ", sys.argv[0], "url trace_file"
sys.exit(1)
URL = sys.argv[1].split(",")
trace_list = []
# load the trace
with open(sys.argv[2]) as fin:
# seek to the beginning of the file and read all traces
fin.seek(0)
j = 0
for trace in fin.readlines():
[feature, info] = trace.split('DecisionMap')
trace_time = int(feature.split('\t',1)[0]) / 1000
[decision_str, load_str] = info.strip().split('LoadMap')
decision_map = dict(decision.split(',') for decision in decision_str.strip().split('\t'))
load_map = dict([load.split(',')[0], load.split(',')[1].split(';')] for load in load_str.strip().split('\t'))
for load in load_map:
load_map[load] = dict(zip(load_map[load][0::2], load_map[load][1::2]))
trace_list.append([trace_time, feature, decision_map, load_map, j])
j+=1
# initialize
trace_start_time = trace_list[0][0]
trace_stop_time = trace_list[len(trace_list) - 1][0]
request_num = [[0,0] for i in range(trace_stop_time - trace_start_time + 1)]
load_dict_list = [{} for i in range((trace_stop_time - trace_start_time)/60 + 1)]
cost_list = [0 for i in range(trace_stop_time - trace_start_time + 1)]
for load_dict in load_dict_list:
load_dict['total_sessions'] = 0
for trace in trace_list:
load_dict_list[(trace[0] - trace_start_time) / 60]['total_sessions'] += 1
update_thread = threading.Thread(target=update_performer)
update_thread.daemon = True
update_thread.start()
test_start_time = time.time()
test_second = 0
send_num = 0
fout1 = open('separa_result','w')
fout = open('result.txt','w')
# start the test
print "------------------------------ %3d sec" % test_second
for trace in trace_list:
while (time.time() - test_start_time) < (trace[0] - trace_start_time):
time.sleep(0.05)
if int(time.time() - test_start_time) > test_second:
test_second = int(time.time() - test_start_time)
print "| send %d, average cost %d" % (send_num, cost_list[test_second-1]/request_num[test_second-1][1])
send_num = 0
fout.write(str(cost_list[test_second-1] / request_num[test_second-1][1]) + '\n')
print "------------------------------ %3d sec" % test_second
thread = threading.Thread(target=request_performer, args=(trace))
thread.daemon = True
thread.start()
send_num += 1
# wait all the requests and updates are finished
time.sleep(TIMEOUT * 2)
fout.close()
fout1.close()
print request_num
print cost_list
#with open('result.txt', 'w') as fout:
# for i in range(len(cost_list)):
# fout.write(str(cost_list[i] / request_num[i][1]) + '\n')
| [
"junchenjiang@Junchens-MacBook-Pro-2.local"
] | junchenjiang@Junchens-MacBook-Pro-2.local |
02d24060c8e2ae54ffa980ef62fec59464614b83 | da45d1ca420ee93a4c40d2be4e6a62c59925c72c | /python/test/robotics/robots/test_aizek_robot.py | 762a62c43066111c1b4efc3c47792ee03f79d31f | [
"MIT"
] | permissive | asydorchuk/robotics | dd2fe8cd7f593c842f467f67d4fc597202a3926b | 678fb7e879326380c5bd9795d81beb21efe4d30c | refs/heads/master | 2021-01-20T02:19:28.741221 | 2015-06-02T22:14:05 | 2015-06-02T22:14:05 | 12,576,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,809 | py | import math
import mock
import unittest
from robotics.robots.aizek_robot import AizekRobot
class TestAizekRobot(unittest.TestCase):
ROBOT_WHEEL_RADIUS = 0.025
ROBOT_WHEEL_DISTANCE = 0.1
def setUp(self):
self.lmotor = mock.Mock()
self.rmotor = mock.Mock()
self.wencoder = mock.Mock()
self.lsensor = mock.Mock()
self.fsensor = mock.Mock()
self.rsensor = mock.Mock()
self.robot = AizekRobot(
left_motor=self.lmotor,
right_motor=self.rmotor,
wheel_encoder=self.wencoder,
left_distance_sensor=self.lsensor,
front_distance_sensor=self.fsensor,
right_distance_sensor=self.rsensor,
wheel_radius=self.ROBOT_WHEEL_RADIUS,
wheel_distance=self.ROBOT_WHEEL_DISTANCE,
)
def testUpdatePositionRotationMovement1(self):
self.robot.updatePosition(-0.5 * math.pi, 0.5 * math.pi)
self.assertAlmostEqual(0.0, self.robot.pos_x)
self.assertAlmostEqual(0.0, self.robot.pos_y)
self.assertAlmostEqual(0.25 * math.pi, self.robot.phi)
def testUpdatePositionRotationMovement2(self):
self.robot.updatePosition(0.0, 0.5 * math.pi)
self.assertAlmostEqual(0.125 * math.pi, self.robot.phi)
self.robot.updatePosition(0.5 * math.pi, 0.0)
self.assertAlmostEqual(0.0, self.robot.phi)
def testUpdatePositionRotationMovement3(self):
self.robot.updatePosition(0.0, 5 * math.pi)
self.assertAlmostEqual(-0.75 * math.pi, self.robot.phi)
def testUpdatePositionRotationMovement4(self):
self.robot.updatePosition(0.0, 99 * math.pi)
self.assertAlmostEqual(0.75 * math.pi, self.robot.phi)
self.robot.updatePosition(104 * math.pi, 0.0)
self.assertAlmostEqual(0.75 * math.pi, self.robot.phi)
def testUpdatePositionRotationMovement5(self):
self.robot.updatePosition(23.75 * math.pi, 23.75 * math.pi)
self.assertAlmostEqual(0.0, self.robot.phi)
def testUpdatePositionLinearMovement1(self):
self.robot.setPosition(0.0, 0.0, 0.25 * math.pi)
self.robot.updatePosition(math.pi, math.pi)
self.assertAlmostEqual(0.025 / math.sqrt(2.0) * math.pi, self.robot.pos_x)
self.assertAlmostEqual(0.025 / math.sqrt(2.0) * math.pi, self.robot.pos_y)
self.assertAlmostEqual(0.25 * math.pi, self.robot.phi)
self.robot.updatePosition(2 * math.pi, -2 * math.pi)
self.robot.updatePosition(math.pi, math.pi)
self.assertAlmostEqual(0.0, self.robot.pos_x)
self.assertAlmostEqual(0.0, self.robot.pos_y)
self.assertAlmostEqual(-0.75 * math.pi, self.robot.phi)
def testUpdatePositionCurvedMovement1(self):
self.robot.updatePosition(0.0, 2 * math.pi)
self.assertAlmostEqual(0.05, self.robot.pos_x)
self.assertAlmostEqual(0.05, self.robot.pos_y)
self.assertAlmostEqual(0.5 * math.pi, self.robot.phi)
def testUpdatePositionCurvedMovement2(self):
self.robot.updatePosition(0.0, -2 * math.pi)
self.assertAlmostEqual(-0.05, self.robot.pos_x)
self.assertAlmostEqual(0.05, self.robot.pos_y)
self.assertAlmostEqual(-0.5 * math.pi, self.robot.phi)
def testUpdatePositionCurvedMovement3(self):
self.robot.updatePosition(2 * math.pi, 0.0)
self.assertAlmostEqual(0.05, self.robot.pos_x)
self.assertAlmostEqual(-0.05, self.robot.pos_y)
self.assertAlmostEqual(-0.5 * math.pi, self.robot.phi)
def testUpdatePositionCurvedMovement4(self):
self.robot.updatePosition(-2 * math.pi, 0.0)
self.assertAlmostEqual(-0.05, self.robot.pos_x)
self.assertAlmostEqual(-0.05, self.robot.pos_y)
self.assertAlmostEqual(0.5 * math.pi, self.robot.phi)
| [
"sydorchuk.andriy@gmail.com"
] | sydorchuk.andriy@gmail.com |
209a7ce101cdd36f27ca12065507ae893c043ec7 | 23f8e15047e2303bbc0ef6d7db93286f629f139a | /src/Legacy/ruscorpora_tagging/semantics.py | 674fbe46dee2c13c15db5c8ef260d64b38f7c5d0 | [] | no_license | russian-national-corpus/preprocessing | 988a0616f3082ae3eeb63f573635f4888c4e5eb1 | 4b6a5a9f822d0d7a1d16baaf72795f888add5f23 | refs/heads/master | 2021-06-13T00:14:44.104132 | 2019-10-03T20:38:34 | 2019-10-03T20:38:34 | 165,403,637 | 2 | 0 | null | 2021-03-26T13:45:33 | 2019-01-12T15:32:47 | Python | UTF-8 | Python | false | false | 11,295 | py | # -*- Encoding: utf-8 -*-
# All rights belong to Non-commercial Partnership "Russian National Corpus"
# http://ruscorpora.ru
import sys
import os
import xml.sax
import codecs
import re
import time
from modules import common
import global_trash
ignored_columns = ["ex", "dc"]
feature_token_re = re.compile("^[a-z0-9_\-]+$")
bracketed_re = re.compile("^(.*)\(.*\)(.*)$")
merge_items = {
u"r:concr" : u"concr",
u"r:abstr" : u"abstr",
u"t:stuff" : u"mat",
u"pt:aggr" : u"coll",
u"r:qual" : u"qual",
u"r:pers" : u"pers",
u"r:ref" : u"refl",
u"r:rel" : u"rel",
u"r:indet" : u"indef",
u"r:neg" : u"neg",
u"r:poss" : u"poss",
u"r:dem" : u"dem",
u"r:spec" : u"def"
}
merge_classes = {
u"S" : set([u"concr", u"abstr", u"mat", u"coll"]),
u"A" : set([u"qual", u"rel", u"poss"]),
u"SPRO" : set([u"pers", u"refl"]),
u"*PRO" : set([u"rel", u"indef", u"neg", u"poss", u"dem", u"def"])
}
dictionary = None
class SemanticEntry:
def __init__ (self, lemma, category):
self.lemma = lemma
self.category = category
self.primary_features = []
self.secondary_features = []
class SemanticDictionary:
def __init__ (self, filename):
self.data = {}
self.stats = {}
headers = []
if filename != None and len(filename) > 0:
print "semantics.py: processing file " + filename
src = codecs.getreader("utf-8")(file(filename, "rb"))
for line in src:
tokens = line.strip().split(";")
if not tokens: continue
if tokens[0] == "Cat" and tokens[1] == "Lemma":
headers = [x.strip().lower() for x in tokens]
continue
elif not headers:
raise ValueError, "No header before data line in file '" + filename + "'"
if len(tokens) < 2:
print >>sys.stderr, "Trouble: bad line >", line
continue
category = tokens[0].strip().lower().replace("-", "")
lemma = tokens[1].strip().lower()
key = category + ":" + lemma
entry = self.data.get(key)
if entry is None:
entry = SemanticEntry(lemma, category)
self.data[key] = entry
primary = (len(tokens) > 2 and tokens[2] == "1")
features = []
for h, t in zip(headers, tokens)[3:]:
if h in ignored_columns:
continue
while True:
match = bracketed_re.match(t)
if not match: break
t = match.group(1) + match.group(2)
for s in t.split('/'):
if '$' in s or '?' in s or '*' in s:
continue
parts = [h]
for p in s.strip().lower().replace('@', '').split(':'):
if not feature_token_re.match(p): break
parts.append(p)
if len(parts) < 2: continue
s = ":".join(parts)
s = s.replace("ev:ev", "ev")
features.append(s)
# Stats
rec = self.stats.setdefault(s, [0, []])
rec[0] += 1
if len(rec[1]) < 20:
rec[1].append((category, lemma))
if primary:
entry.primary_features.append(features)
else:
entry.secondary_features.append(features)
def get(self, in_entry, default=None):
return self.data.get(in_entry, default)
def _semantic_filter(features, category, grams):
result = []
animated = (u'anim' in grams) # or u'од' in grams)
qualitative = (u'brev' in grams or # u'кр' in grams or
u'comp' in grams or # u'срав' in grams or
u'supr' in grams) # or u'прев' in grams)
for f in features:
if not doMerge:
if category == u'S':
semantic_animated = (u't:hum' in f or
u't:animal' in f or
u't:persn' in f or
u't:patrn' in f or
u't:famn' in f)
if animated != semantic_animated:
continue
elif category == u'A':
if qualitative and 'r:qual' not in f:
continue
result.extend(f)
return list(set(result))
class CorpusHandler(xml.sax.handler.ContentHandler):
def __init__(self, outfile):
xml.sax.handler.ContentHandler.__init__(self)
self.out = outfile
def close_pending_tag(self):
if self.tag_pending:
self.out.write(">")
self.tag_pending = False
def startDocument(self):
self.out.write(u"<?xml version=\"1.0\" encoding=\"windows-1251\"?>\n")
self.tag_pending = False
def endDocument(self):
self.close_pending_tag()
def startElement(self, tag, attrs):
self.close_pending_tag()
self.out.write("<%s" % tag)
for (attname, attvalue) in attrs.items():
if attname != u"gr":
self.out.write(" %s=\"%s\"" % (attname, common.quoteattr(attvalue)))
if tag == "ana":
lemma = attrs.get(u"lex")
features = attrs.get(u"gr")
if lemma and features:
grams = features.replace(',', ' ').replace('=', ' ').replace('(', ' ').replace(')', ' ').replace('/', ' ').strip().split()
category = grams[0].lower().replace("-", "")
entry = dictionary.get(category + ":" + lemma.lower())
if entry:
if not doMerge:
primary_semantics = _semantic_filter(entry.primary_features, category, grams)
if primary_semantics:
self.out.write(" sem=\"%s\"" % common.quoteattr(" ".join(
primary_semantics)))
secondary_semantics = _semantic_filter(entry.secondary_features, category, grams)
if secondary_semantics:
self.out.write(" sem2=\"%s\"" % common.quoteattr(" ".join(
secondary_semantics)))
else:
features = _semantic_filter(entry.primary_features + entry.secondary_features, category, grams)
addition = [merge_items.get(x) for x in features]
if category.lower() == u"s":
addition = merge_classes[u"S"].intersection(addition)
elif category.lower() == u"a":
addition = merge_classes[u"A"].intersection(addition)
if u"rel" in addition:
addition.discard(u"rel")
addition.add(u"reladj")
elif u"poss" in addition:
addition.discard(u"poss")
addition.add(u"possadj")
elif category.endswith(u"apro") or category.lower() == u"spro":
if category.lower() == u"spro":
addition = merge_classes[u"SPRO"].intersection(addition)
else:
addition = merge_classes[u"*PRO"].intersection(addition)
if lemma == u"где":
addition.discard(u"indef")
else:
addition = []
grams.extend(list(addition))
self.out.write(" gr=\"%s\"" % common.quoteattr(" ".join(grams)))
self.tag_pending = True
def endElement(self, tag):
if self.tag_pending:
self.out.write("/>")
else:
self.out.write("</%s>" % tag)
self.tag_pending = False
def characters(self, content):
if content:
self.close_pending_tag()
self.out.write(common.quotetext(content))
def ignorableWhitespace(self, whitespace):
self.characters(whitespace)
def convert_directory(indir, outdir, indent = ""):
if not os.path.exists(outdir):
os.makedirs(outdir)
curdirname = os.path.basename(indir)
print "%sEntering %s" % (indent, curdirname)
starttime = time.time()
nextindent = indent + " "
filelist = os.listdir(indir)
subdirs = [f for f in filelist if os.path.isdir(os.path.join(indir, f))]
files = [f for f in filelist if not os.path.isdir(os.path.join(indir, f))]
for subdir in subdirs:
if subdir == ".svn": continue
inpath = os.path.join(indir, subdir)
outpath = os.path.join(outdir, subdir)
convert_directory(inpath, outpath, nextindent)
for f in files:
inpath = os.path.join(indir, f)
outpath = os.path.join(outdir, f)
convert(inpath, outpath, nextindent)
print "%sTime: %.2f s" % (indent, time.time() - starttime)
def convert(inpath, outpath, indent=""):
print "%s%s" % (indent, os.path.basename(inpath)),
out = codecs.getwriter("windows-1251")(file(outpath, "wb"), 'xmlcharrefreplace')
try:
xml.sax.parse(inpath, CorpusHandler(out))
print " - OK"
except xml.sax.SAXParseException:
print " - FAILED"
doMerge = False
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--input", dest="input", help="input path")
parser.add_option("--output", dest="output", help="output path")
parser.add_option("--semdict", dest="dict", help="semantic dictionary path")
parser.add_option("--merge", action="store_true", dest="merge", default=False, help="use full morphology")
parser.add_option("--mystem", dest="mystem", help="mystem binary path")
(options, args) = parser.parse_args()
global_trash.MYSTEM_PATH = options.mystem
doMerge = options.merge
inpath = os.path.abspath(options.input)
outpath = os.path.abspath(options.output)
print "Reading the semantic dictionary...",
global dictionary
dictionary = SemanticDictionary(options.dict)
print "done!"
test = False
if test:
writer = codecs.getwriter("windows-1251")(sys.stdout, 'xmlcharrefreplace')
atoms = dictionary.stats.keys()
atoms.sort()
for key in atoms:
(freq, samples) = dictionary.stats[key]
print >>writer, str(freq).rjust(6)+"\t"+key+"\t",
for item in samples:
print >>writer, ":".join(item),
if freq > 20:
print >>writer, "...",
print >>writer
sys.exit(1)
else:
dictionary = dictionary.data
if os.path.isdir(inpath):
convert_directory(inpath, outpath)
else:
convert(inpath, outpath)
if __name__ == "__main__":
main()
| [
"vyshkant@gmail.com"
] | vyshkant@gmail.com |
25b7e0b318af75980c433cbe77c201b9aa7f1f9e | dbffb070dca3931f2321b3ff4782f5e0ab012cd3 | /client.py | 616275b9cd989c2f4cb5e53bb9ffb25002659932 | [] | no_license | maryam-542/Project-Module-1 | fb63b0130301734c8ef6c81337e0c9eb602c1bd4 | 93ce823aa59b091c18c9f73d388f54502b4dde28 | refs/heads/master | 2016-09-13T23:16:51.804065 | 2016-05-26T17:30:54 | 2016-05-26T17:30:54 | 59,131,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | from socket import *
from threading import Thread
import sys
HOST = 'localhost'
PORT = 8888
ADDR = (HOST, PORT)
Sock = socket(AF_INET, SOCK_STREAM)
Sock.connect(ADDR)
def recv():
while True:
data = Sock.recv(1024)
if not data: sys.exit(0)
print data
Thread(target=recv).start()
while True:
data = raw_input('> ')
Sock.send(data)
Sock.close()
| [
"bsef14m542@pucit.edu.pk"
] | bsef14m542@pucit.edu.pk |
6408769016157138b15691bb9158e5695f16d725 | 58bbaa2cd1af8dbd2f862708a0da7f4b3e7246b8 | /Yerma/pymongo/createcol.py | 530039b05bae261d060c4323f7544b3b916aee17 | [] | no_license | Yermito/PPEND | 7a087243fa578fa0b50541da160a11d2b73dcbc3 | 7c59fec6a3aaaab5c022d46fcc97b201f4bbf84c | refs/heads/master | 2022-05-28T17:36:16.310690 | 2020-05-01T17:45:33 | 2020-05-01T17:45:33 | 260,516,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | import pymongo
myclient = pymongo.MongoClient('localhost:27017')
mydatabase = myclient["university"]
mycollection = mydatabase["fit"]
print(mydatabase.list_collection_names())
| [
"noreply@github.com"
] | noreply@github.com |
19ef56453f855c29a72eaa6c8c52e2ca967e6a36 | f8e8e365c9cf58b61d72655bc2340baeaed5baff | /Leetcode/Python Solutions/Binary Search/ValidPerfectSquare.py | c4e8a70a8beb4c70db11315cbe222321332ff181 | [
"MIT"
] | permissive | Mostofa-Najmus-Sakib/Applied-Algorithm | 39a69f6b9ed113efe4a420d19cad79e0aa317637 | bc656fd655617407856e0ce45b68585fa81c5035 | refs/heads/master | 2023-08-31T19:54:34.242559 | 2021-11-05T03:43:35 | 2021-11-05T03:43:35 | 412,263,430 | 0 | 0 | MIT | 2021-09-30T23:45:29 | 2021-09-30T23:45:25 | null | UTF-8 | Python | false | false | 898 | py | """
LeetCode Problem 367. Valid Perfect Square
Link: https://leetcode.com/problems/valid-perfect-square/
Written by: Mostofa Adib Shakib
Language: Python
Observation:
1) Number less than 2 will always form perfect squares so return True.
2) The number will always be in the first half of the array. Hence, we can discard the second half.
Time Complexity: O(log n)
Space Complexity: O(1)
"""
class Solution:
def isPerfectSquare(self, num: int) -> bool:
if num <= 1: return True
left = 2
right = num//2
while left <= right:
mid = (left + right) // 2
guess = mid * mid
if guess == num:
return True
elif guess < num:
left = mid + 1
else:
right = mid - 1
return False | [
"adibshakib@gmail.com"
] | adibshakib@gmail.com |
a921fe8b1f0c63d2290abf91aefc289205f29ead | 6024cf204c8e14553dca0e37f699aa08d963404a | /checkout/models.py | 754be2799fec355e3103da68d5a800213d40f0bc | [] | no_license | Code-Institute-Submissions/Nourish-and-Lift | 5f2c141e604f8d87fcb07bd7f61766456535bab3 | 8998b75b18c0f697cc6caff7d586bf5fb06ce519 | refs/heads/main | 2023-07-16T04:49:39.718323 | 2021-08-29T08:33:53 | 2021-08-29T08:33:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,623 | py | import uuid
from django.db import models
from django.db.models import Sum
from django.conf import settings
from django_countries.fields import CountryField
from products.models import Product
from profiles.models import UserProfile
class Order(models.Model):
order_number = models.CharField(max_length=32, null=False, editable=False)
user_profile = models.ForeignKey(
UserProfile, on_delete=models.SET_NULL,
null=True, blank=True, related_name='orders')
full_name = models.CharField(max_length=50, null=False, blank=False)
email = models.EmailField(max_length=254, null=False, blank=False)
phone_number = models.CharField(max_length=20, null=False, blank=False)
country = CountryField(blank_label='Country *', null=False, blank=False)
postcode = models.CharField(max_length=20, blank=True)
town_or_city = models.CharField(max_length=40, null=False, blank=False)
street_address1 = models.CharField(max_length=80, null=False, blank=False)
street_address2 = models.CharField(max_length=80, null=False, blank=False)
county = models.CharField(max_length=40, null=False, blank=False)
date = models.DateTimeField(auto_now_add=True)
delivery_cost = models.DecimalField(
max_digits=6, decimal_places=2, null=False, default=0)
order_total = models.DecimalField(
max_digits=10, decimal_places=2, null=False, default=0)
grand_total = models.DecimalField(
max_digits=10, decimal_places=2, null=False, default=0)
original_bag = models.TextField(null=False, blank=False, default='')
stripe_pid = models.CharField(
max_length=254, null=False, blank=False, default='')
def _generate_order_number(self):
"""
Generate a random, unique order number via UUID
"""
return uuid.uuid4().hex.upper()
def update_total(self):
"""
Update grand total each time a line item is added,
accounting for delivery costs.
"""
self.order_total = self.lineitems.aggregate(
Sum('lineitem_total'))['lineitem_total__sum'] or 0
if self.order_total < settings.FREE_DELIVERY_THRESHOLD:
self.delivery_cost = (
self.order_total * settings.STANDARD_DELIVERY_PERCENTAGE / 100
)
else:
self.delivery_cost = 0
self.grand_total = self.order_total + self.delivery_cost
self.save()
def save(self, *args, **kwargs):
"""
Override the original save method to set the order number
if it hasn't been set already.
"""
if not self.order_number:
self.order_number = self._generate_order_number()
super().save(*args, **kwargs)
def __str__(self):
return self.order_number
class OrderLineItem(models.Model):
order = models.ForeignKey(
Order, null=False, blank=False,
on_delete=models.CASCADE, related_name='lineitems')
product = models.ForeignKey(
Product, null=False, blank=False, on_delete=models.CASCADE)
quantity = models.IntegerField(null=False, blank=False, default=0)
lineitem_total = models.DecimalField(
max_digits=6, decimal_places=2,
null=False, blank=False, editable=False)
def save(self, *args, **kwargs):
"""
Override the original save method to set the lineitem total
and update the order total.
"""
self.lineitem_total = self.product.price * self.quantity
super().save(*args, **kwargs)
def __str__(self):
return f'SKU {self.product.sku} on order {self.order.order_number}'
| [
"hdhillon478@gmail.com"
] | hdhillon478@gmail.com |
8606168f73fe5c873daa5e66d25d6624afb096ad | e5266a20d3e610cf3fcfb75610d309ab386e1282 | /AppFPBajo/migrations/0003_filtro_butterworth.py | 7a2bde7406f4877a57297925679f9b5803fc10fa | [] | no_license | slopezrap/FiltrosUPNA | 5105736b990aeff29f70661f756a10f43b253535 | 206d8a35f8c13b4a9255b51030d9c6478571cadd | refs/heads/master | 2020-03-16T00:52:15.875277 | 2018-07-05T16:49:01 | 2018-07-05T16:49:01 | 130,229,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,015 | py | # Generated by Django 2.0.4 on 2018-05-31 10:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('AppFPBajo', '0002_auto_20180531_1039'),
]
operations = [
migrations.CreateModel(
name='Filtro_Butterworth',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ordenFiltro', models.IntegerField(verbose_name='Orden del Filtro')),
('g_1', models.FloatField()),
('g_2', models.FloatField()),
('g_3', models.FloatField()),
('g_4', models.FloatField()),
('g_5', models.FloatField()),
('g_6', models.FloatField()),
('g_7', models.FloatField()),
('g_8', models.FloatField()),
('g_9', models.FloatField()),
('g10', models.FloatField()),
],
),
]
| [
"Sergio.LopezRapado@EURS.EY.NET"
] | Sergio.LopezRapado@EURS.EY.NET |
baa4949892d605c13b56159e9f3b000b491bc3e4 | 7ba34d8a6eaed1ccc4efb043060a4b8415c47b1d | /netdev/vendors/alcatel/alcatel_aos.py | cb2db396a24d53fc11c48975244eebad4226c885 | [
"Apache-2.0"
] | permissive | ixjx/netdev | 8b9da831dd5e45855ee65ce0552c0039ecff285b | cd30593c233c92e301cdaca97798ec71668ab78d | refs/heads/master | 2023-07-04T08:04:58.243607 | 2021-07-28T09:32:11 | 2021-07-28T09:32:11 | 295,272,213 | 0 | 0 | Apache-2.0 | 2020-09-14T01:27:07 | 2020-09-14T01:27:06 | null | UTF-8 | Python | false | false | 1,242 | py | from netdev.vendors.base import BaseDevice
from netdev.logger import logger
import asyncio
import re
class AlcatelAOS(BaseDevice):
"""Class for working with Alcatel AOS"""
async def _read_until_prompt_or_pattern(self, pattern="", re_flags=0):
"""Read until either self.base_pattern or pattern is detected. Return ALL data available"""
output = ""
logger.info("Host {}: Reading until prompt or pattern".format(self._host))
if not pattern:
pattern = self._base_pattern
base_prompt_pattern = self._base_pattern
while True:
fut = self._stdout.read(self._MAX_BUFFER)
try:
output += await asyncio.wait_for(fut, self._timeout)
except asyncio.TimeoutError:
raise TimeoutError(self._host)
if re.search("\n" + pattern, output, flags=re_flags) or re.search(
"\n" + base_prompt_pattern, output, flags=re_flags
):
logger.debug(
"Host {}: Reading pattern '{}' or '{}' was found: {}".format(
self._host, pattern, base_prompt_pattern, repr(output)
)
)
return output
| [
"ericorain@hotmail.com"
] | ericorain@hotmail.com |
ed0466956305c5f5e6955a737d43b2039c8f0fc5 | 2a54e8d6ed124c64abb9e075cc5524bb859ba0fa | /.history/4-functional-programming/7-list-comprehension_20200422222427.py | 81d606e197ec10031073a3db9b3879a25cb59bc1 | [] | no_license | CaptainStorm21/Python-Foundation | 01b5fbaf7a913506518cf22e0339dd948e65cea1 | a385adeda74f43dd7fb2d99d326b0be23db25024 | refs/heads/master | 2021-05-23T01:29:18.885239 | 2020-04-23T19:18:06 | 2020-04-23T19:18:06 | 253,171,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | #list, set, dicitonary
my_list = []
for char in 'HELLO':
my_list.append(char)
print(my_list)
dict_list = [char for char in 'good morning']
print(dict_list)
num_list = [num for num in range (0, 100)]
print(num_list)
print("divide by 3 with no remainder")
num_list3 = [num for num in range (0, 100) if(num%3 ==0)]
print(num_list3) | [
"tikana4@yahoo.com"
] | tikana4@yahoo.com |
2ab1dae99d3aab2a0201b1a098c67d8853ed4631 | 28a681ed25b767620f0a21580ddd4e057ccfed98 | /gate_camera.py | d5cfac49697c354726f074ca03b4e4e6b53d200c | [
"MIT"
] | permissive | sid1689/parking_lot_gate_final | 761cb4d473876e92a6ff9527263f4a7249d0d306 | d097c811aee706f23f1f1c83d130aa8e6654c7a8 | refs/heads/main | 2023-08-01T20:56:59.795965 | 2021-10-02T17:01:52 | 2021-10-02T17:01:52 | 412,719,427 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 583 | py | from parking_camera import ParkingCamera
class GateCamera(ParkingCamera):
"""
Represents a camera at a gate.
"""
def __init__(self, camera_address):
super().__init__(camera_address)
@property
def _gate_open(self):
return not self._can_take_picture
@_gate_open.setter
def _gate_open(self, value):
self._can_take_picture = not value
if not self._can_take_picture:
print("Cancela aberta.")
else:
print("Cancela fechada.")
def _handle_input(self):
super()._handle_input()
if self._key == ord('c'):
if self._gate_open:
self._gate_open = False
| [
"sid_artaalmeida@hotmail.com"
] | sid_artaalmeida@hotmail.com |
1d97790987ac8d1d3091fc2896c5f47f49471e9b | e24da5964925d5b32f76b0aa4d671018b4a2c5be | /cride/circles/models/memberships.py | 467e5178d49feb9277b9f8b4780d46490ada2345 | [
"MIT"
] | permissive | valot3/Cride-API | 9b50db986cd27b42b22679b2d1a806653b7e4348 | a9e201942e6eecd479f575733e93ff73e6df573d | refs/heads/master | 2023-08-20T12:53:27.220691 | 2021-09-14T23:34:15 | 2021-09-14T23:34:15 | 406,551,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,495 | py | """Membership model."""
#Django
from django.db import models
#Project
from cride.utils.models import CRideModel
class Membership(CRideModel):
"""Membership model.
A membership model is the table that holds the relationship between a user and a circle.
"""
user = models.ForeignKey('users.User', on_delete=models.CASCADE)
profile = models.ForeignKey('users.Profile', on_delete=models.CASCADE)
circle = models.ForeignKey('circles.Circle', on_delete=models.CASCADE)
is_admin = models.BooleanField(
'circle admin',
default=False,
help_text = 'Circle admin can update the circle\'s data and manage its members.'
)
#Invitation
used_invitations = models.PositiveSmallIntegerField(default=0)
remaining_invitations = models.PositiveSmallIntegerField(default=0)
invited_by = models.ForeignKey(
'users.User',
null = True,
on_delete = models.SET_NULL,
related_name = 'invited_by'
)
#Stats
rides_taken = models.PositiveSmallIntegerField(default=0)
rides_offered = models.PositiveSmallIntegerField(default=0)
#Status
is_active = models.BooleanField(
'active status',
default = True,
help_text = 'Only active users are allowed to interact in the circle.'
)
def __str__(self):
"""Return username and circle."""
return '@{} at #{}'.format(
self.user.username,
self.circle.slug_name
)
| [
"valen.blanco.2004@hotmail.com"
] | valen.blanco.2004@hotmail.com |
7d19f4be3e65d55621b576d2306fd4eb58e60381 | 015ce35e6344d1726173594ae509dfc1ca6f856d | /2-basics/Study basics/loops.py | 7ce93a431733e32e961c8476c4ae0d1bd2085bee | [] | no_license | ayman-elkassas/Python-Notebooks | 4af80df75c15a6ac3049450b3920d500fef0e581 | 26a8265f458c40ac22965d55722f32a650851683 | refs/heads/master | 2023-04-03T19:12:17.707673 | 2021-04-10T21:32:37 | 2021-04-10T21:32:37 | 356,699,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 120 | py | count=5
while count>99:
print("yes")
count-=1
else:
print("if")
for letter in "python":
print(letter)
| [
"aymanelkassas88@gmail.com"
] | aymanelkassas88@gmail.com |
dfdfdc73b69afa83125300340f0252cfe3100d38 | a127d0feb3bcf4f2581f385bb24f2b789c771c9c | /10syo/95_2.py | 0a1ea7e35fd38d9a0daad78a923622656306fdf5 | [] | no_license | NgoVanDau/nlp100knock | 01383e4cc5a1470508744668103b9ea1a238b892 | 3ef63c0d2dfb55c0e6a31aced645f284325a98a5 | refs/heads/master | 2023-03-22T13:19:23.932429 | 2018-08-05T05:27:11 | 2018-08-05T05:27:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 994 | py | fname_input = 'combined_out.tab'
class Data:
def __init__(self, human_score, my_score):
self.human_score = human_score
self.my_score = my_score
def __repr__(self):
return 'Data%s' % repr(self.__dict__)
# データ配列作成
with open(fname_input) as data_file:
def read_data():
for line in data_file:
word1, word2, human_score, my_score = line.split('\t')
yield Data(float(human_score), float(my_score))
data = list(read_data())
# 順位付け
data_sorted_by_human_score = sorted(data, key=lambda data: data.human_score)
for order, d in enumerate(data_sorted_by_human_score):
d.human_order = order
data_sorted_by_my_score = sorted(data, key=lambda data: data.my_score)
for order, d in enumerate(data_sorted_by_my_score):
d.my_order = order
# スピアマン相関係数算出
N = len(data)
total = sum((d.human_order - d.my_order) ** 2 for d in data)
result = 1 - (6 * total) / (N ** 3 - N)
print(result)
| [
"kota.k.1132.pda@gmail.com"
] | kota.k.1132.pda@gmail.com |
220a336a1a4b55acfe67c77f5f2cae1589985e1e | 830225393fad2b53d1592a87973074a9c5cced02 | /build/rbcar_common/rbcar_description/catkin_generated/pkg.installspace.context.pc.py | 729bbf05e9ea062319bef7ae2bfa82f0ecf55f75 | [] | no_license | gtziafas/rbcar_workspace | 24e69c30a62b919d78ca88d04dedff25209893b1 | c64e2c4a19df800b28826a62133d8fd0da3674c0 | refs/heads/master | 2021-06-09T15:53:53.751753 | 2021-05-31T12:29:27 | 2021-05-31T12:29:27 | 131,979,435 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rbcar_description"
PROJECT_SPACE_DIR = "/home/ggtz/rbcar_ws/install"
PROJECT_VERSION = "1.0.5"
| [
"noreply@github.com"
] | noreply@github.com |
bad677f019fc4d7992e5229966f842dcccd1035b | 028884b062038e1b948f879a7c62664b5aeccdfc | /python_ZED_VA/positional_tracking_to_CSVfile.py | 37597a1c0ffe49c64e6c30d2cab72f82c59289ad | [] | no_license | VirginiaPan/BeachMapping | d3728437297f9aedcf0b60fe247fb10c03bcd242 | 9a46859593ccdca7ea59ff0602eb39cbb109769c | refs/heads/master | 2020-06-08T04:06:29.625552 | 2019-07-11T18:00:13 | 2019-07-11T18:00:13 | 193,154,434 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,688 | py |
import pyzed.sl as sl
def main():
# Create a Camera object
zed = sl.Camera()
# Create a InitParameters object and set configuration parameters
init_params = sl.InitParameters()
init_params.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720 # Use HD720 video mode (default fps: 60)
# Use a right-handed Y-up coordinate system
init_params.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP
init_params.coordinate_units = sl.UNIT.UNIT_METER # Set units in meters
# Open the camera
err = zed.open(init_params)
if err != sl.ERROR_CODE.SUCCESS:
exit(1)
# Enable positional tracking with default parameters
py_transform = sl.Transform() # First create a Transform object for TrackingParameters object
tracking_parameters = sl.TrackingParameters(init_pos=py_transform)
err = zed.enable_tracking(tracking_parameters)
if err != sl.ERROR_CODE.SUCCESS:
exit(1)
# Track the camera position during 1000 frames
i = 0
zed_pose = sl.Pose()
zed_imu = sl.IMUData()
runtime_parameters = sl.RuntimeParameters()
#added!
path = '/media/nvidia/SD1/position.csv'
position_file = open(path,'w')
while i < 1000:
if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
# Get the pose of the left eye of the camera with reference to the world frame
zed.get_position(zed_pose, sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD)
zed.get_imu_data(zed_imu, sl.TIME_REFERENCE.TIME_REFERENCE_IMAGE)
# Display the translation and timestamp
py_translation = sl.Translation()
tx = round(zed_pose.get_translation(py_translation).get()[0], 3)
ty = round(zed_pose.get_translation(py_translation).get()[1], 3)
tz = round(zed_pose.get_translation(py_translation).get()[2], 3)
position_file.write("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}\n".format(tx, ty, tz, zed_pose.timestamp))
# Display the orientation quaternion
py_orientation = sl.Orientation()
ox = round(zed_pose.get_orientation(py_orientation).get()[0], 3)
oy = round(zed_pose.get_orientation(py_orientation).get()[1], 3)
oz = round(zed_pose.get_orientation(py_orientation).get()[2], 3)
ow = round(zed_pose.get_orientation(py_orientation).get()[3], 3)
position_file.write("Orientation: Ox: {0}, Oy: {1}, Oz {2}, Ow: {3}\n".format(ox, oy, oz, ow))
i = i + 1
# Close the camera
zed.close()
# Close file
position_file.close()
if __name__ == "__main__":
main()
| [
"vgp4@duke.edu"
] | vgp4@duke.edu |
80e6680172774c8afdd21f6cabff10d713655524 | bbed39c1514e1f8014aa68a3ac59233c726de48a | /cnn_v2.py | 3ef86cdf73ee0f955ddb5a250259df46e48fc17e | [] | no_license | GodzSom/few-shot-segmentation | 37a04ebfe95f04c75e107c1586a4795763b144fa | 7f8e404a1da337e1bccee6cc2f278c998c7b27f7 | refs/heads/master | 2023-04-10T12:29:47.584839 | 2021-04-19T08:47:50 | 2021-04-19T08:47:50 | 281,317,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,128 | py | import torch, os, torchvision
import torch.nn.functional as F
from torch.utils.data import TensorDataset
from PIL import Image
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
from torch.utils.tensorboard import SummaryWriter
from stylegan import z_sample
from nethook import InstrumentedModel
from dissect import collect_stack, stacked_map_new#GeneratorSegRunner,
from CNN_models import dilated_CNN_101_up as CNN_net
# from unet_model import UNet as CNN_net
from kmeans import kmean_viz
from torchvision.utils import save_image
n_class = 6
img_size = 128
def cnn(outdir, test_file, fname, model, raw_noise, given_seg=None, eva_im=None, eva_seg=None):
pt = os.path.join(outdir, fname)
if not os.path.exists(outdir):
os.makedirs(outdir)
os.makedirs(test_file)
# os.makedirs('./results/img/'+args.model_n+'/train/')
writer = SummaryWriter(outdir)
with torch.no_grad():
noise_dataset = torch.utils.data.DataLoader(raw_noise,
batch_size=1, num_workers=0, pin_memory=False)
# Seg #
seg_flat = np.reshape(given_seg, (-1, img_size*img_size, n_class))#[batch, 512*512, 4]
stack = collect_stack(img_size, model, noise_dataset)[0] #[batch, total_c, h, w]
num_chan = stack.shape[0]
stack = np.reshape(stack, (-1, 3008, img_size, img_size))
seg_flat = torch.LongTensor(seg_flat)
_,seg_flat = seg_flat.max(dim=2) #[batch, 512*512, 1]
seg_flat = np.reshape(seg_flat, (-1, img_size*img_size)) #[batch, 512*512]
batch_size = 1
# print(stack.shape)
# print(given_seg.shape)
# assert False
trainDataset = TensorDataset(torch.FloatTensor(stack), torch.LongTensor(seg_flat))
trainLoader = torch.utils.data.DataLoader(dataset = trainDataset, batch_size=batch_size, shuffle=True, num_workers=10, pin_memory=False)
lr_rate = 0.001
iterations = 10000
## Model
#reg_model = Feedforward(num_chan, 200).cuda()
hidden_list = [2000]
reg_model = CNN_net(n_class).cuda()
## Loss
#criterion = FocalLoss().cuda()
criterion = torch.nn.NLLLoss().cuda()
optimizer = torch.optim.Adam(reg_model.parameters(), lr=lr_rate, weight_decay=0)
decayRate = 0.96
my_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=decayRate)
#for param in reg_model.parameters():
# print('Parameter shape = ', param.shape)
#torch.autograd.set_detect_anomaly(True)
for step in range(iterations):
print('Epoch {} / {}'.format(step, iterations))
total_loss = 0
total_nll = 0
total_tv = 0
for (data, target) in trainLoader:
optimizer.zero_grad()
data = data.cuda()
target = target.cuda()
print('------')
print(data.shape)
print(target.shape)
prediction_ori = reg_model(data)
prediction = torch.reshape(prediction_ori, (-1,n_class,img_size*img_size))
nll = criterion(prediction, target)
#loss = weighted_binary_cross_entropy(prediction, target, weights=None)
tv = 1e-7 * (
torch.sum(torch.abs(prediction_ori[:, :, :, :-1] - prediction_ori[:, :, :, 1:])) +
torch.sum(torch.abs(prediction_ori[:, :, :-1, :] - prediction_ori[:, :, 1:, :])))
loss = nll + tv
total_loss += loss
total_nll += nll
total_tv += tv
loss.backward()
optimizer.step()
# print(prediction[0])
print('Batch_Loss: ', total_loss.item())
# Decay every 50 epoch
if step%50 == 0:# and step!=0 :
my_lr_scheduler.step()
for param_group in optimizer.param_groups:
print('Learning rate = ', param_group['lr'])
writer.add_scalar('training loss', total_loss.item()/batch_size, step)
writer.add_scalar('nll', total_nll.item()/batch_size, step)
writer.add_scalar('tv', total_tv.item()/batch_size, step)
torch.save(reg_model.state_dict(), pt)
# print('!!!'+str(torch.max(prediction_ori[0][0])))
# print('!!!'+str(torch.max(prediction_ori)))
# print('!!!'+str(torch.max(target)))
# print('!!!'+str(torch.min(target)))
# print(target)
save_image(prediction_ori[0][0]/torch.max(prediction_ori[0][0]), './debug/img'+str(step)+'.png')
# combined = stacked_map_new(stylegan_stack, reg_model)
# k_im = kmean_viz(combined, 512)
# Image.fromarray((k_im).astype(np.uint8)).resize([1024,1024]).save(test_file+"im_{:03d}.png".format(step), optimize=True, quality=80)
# with torch.no_grad():
# pt = os.path.join(outdir, fname)
# torch.save(reg_model.state_dict(), pt)
def weighted_binary_cross_entropy(output, target, weights=None):
eps = 1e-10
if weights is not None:
assert len(weights) == 2
loss = weights[1] * (target * torch.log(output+eps)) + \
weights[0] * ((1 - target) * torch.log(1 - output+eps))
else:
loss = target * torch.log(output+eps) + (1 - target) * torch.log(1 - output+eps)
return -(torch.mean(loss))
class MaxLoss(torch.nn.Module):
def __init__(self):
super().__init__()
self.eye_line = torch.nn.Parameter(torch.Tensor([1.]))
self.bg_line = torch.nn.Parameter(torch.Tensor([0.]))
@staticmethod
def distance(x,y):
d = torch.norm(x-y, dim=1, keepdim=True)
return d
def forward(self, y_pred, target):
margin = 0.2
pos_loss = F.relu( self.distance(y_pred, self.eye_line) - self.distance(y_pred, self.bg_line)+margin )
neg_loss = F.relu( self.distance(y_pred, self.bg_line) - self.distance(y_pred, self.eye_line)+margin )
loss = torch.mm(target.t(), pos_loss) + torch.mm((1-target).t(), neg_loss)
return loss/target.shape[0]
class ContrastiveLoss(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y_pred, target):
margin = 0.2
pos_thr = 0.6
neg_thr = 0.4
pos_loss = F.relu(pos_thr - y_pred)
neg_loss = F.relu(y_pred - neg_thr)
loss = torch.mm(target.t(), pos_loss) + torch.mm((1-target).t(), neg_loss)
return loss/target.shape[0]
class FocalLoss(torch.nn.Module):
def __init__(self, alpha=0.25, gamma=2.0, logits=False, reduce=True):
super(FocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.logits = logits
self.reduce = reduce
def forward(self, inputs, targets):
if self.logits:
BCE_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduce=False)
else:
BCE_loss = F.binary_cross_entropy(inputs, targets, reduce=False)
pt = torch.exp(-BCE_loss)
F_loss = self.alpha * (1-pt)**self.gamma * BCE_loss
if self.reduce:
return torch.mean(F_loss)
else:
return F_loss
| [
"noreply@github.com"
] | noreply@github.com |
0064553534fc3e9fda5bdb33418d7c997c5bf69e | 4198532417feb21d023d7ba525a74555186b3e89 | /trailscraper/collection_utils.py | 6a4329c59d41b866968966a11c7760b32a2d22e9 | [
"Apache-2.0"
] | permissive | flosell/trailscraper | 375207753c668c78e166381e7e086d1adb950482 | b9edb1483ca8e9ce6ac8540a63334c37abd6242d | refs/heads/master | 2023-09-04T07:27:52.593647 | 2023-09-04T01:34:25 | 2023-09-04T01:40:20 | 112,079,872 | 726 | 32 | Apache-2.0 | 2023-09-11T01:57:25 | 2017-11-26T12:15:52 | Python | UTF-8 | Python | false | false | 305 | py | """Functions to help with collections"""
import collections
def consume(iterator):
"""Consume the whole iterator to trigger side effects; does not return anything"""
# Inspired by this: https://docs.python.org/3/library/itertools.html#itertools-recipes
collections.deque(iterator, maxlen=0)
| [
"florian.sellmayr@gmail.com"
] | florian.sellmayr@gmail.com |
4ce16e2eaacf769eca66aff6d2b31bc0a779050b | 749fa5a348def37a142ba15b3665ff1525c50321 | /image_reseize.py | 3950f4384ad5affa810deba01159b28e57484366 | [] | no_license | rahmakaichi1/Object_Detection | 163bd5ed19fa6e8ae9704d093c216734142c80a8 | aecd6346cf5baf94e1ecac0c4df42d1c5a254d4e | refs/heads/master | 2022-10-31T04:25:18.713807 | 2019-08-23T12:19:23 | 2019-08-23T12:19:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 619 | py |
directory ="/content/Tensorflow_object_detector/workspace/training_OD/images/jpg"
for file_name in os.listdir(directory):
print("Processing %s" % file_name)
image = Image.open(os.path.join(directory, file_name))
new_dimensions = (224, 224)
output = image.resize(new_dimensions, Image.ANTIALIAS)
folder = '/content/Tensorflow_object_detector/workspace/training_OD/images/reseizedImages'
if not os.path.exists(folder):
os.makedirs(folder)
output_file_name = os.path.join(folder, file_name)
#output.save(output_file_name, "JPEG", quality = 95)
output.save(output_file_name)
print("All done")
| [
"rahma.akaichi@ensi-uma.tn"
] | rahma.akaichi@ensi-uma.tn |
804e51dfde50f67856b882d399fdc55f06b50df1 | ef064aed21874b178aaac515a741c078aa0aaf53 | /tests/__init__.py | 4e8bb2e23aa1c148756a7e0133be24c53b528d63 | [
"MIT"
] | permissive | fotonauts/fwissr-python | 0649d970fdf807b74753a512eb75f1fa6cd4ddad | 4314aa53ca45b4534cd312f6343a88596b4416d4 | refs/heads/master | 2021-06-06T06:38:14.332173 | 2013-12-05T09:28:18 | 2013-12-05T09:28:18 | 13,952,600 | 0 | 0 | MIT | 2021-03-25T21:42:00 | 2013-10-29T10:20:28 | Python | UTF-8 | Python | false | false | 147 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
| [
"oct@fotonauts.com"
] | oct@fotonauts.com |
760838f1d87ce31ef4b3b11e4015e744328df7c6 | f265a8d47a3b9ff21dfbd3a14405f8021c51c692 | /DocumentFiltering/naivebayes.py | 3136d8e7a8636886480bca4462d4293be4e8d2e5 | [] | no_license | fengxianyun/ji_ti_zhi_hui | a0f7ca5c04daa6d605d7e785e8d07de63e46e73a | c4548ec106e398bdb574c181a19e6afba6450258 | refs/heads/master | 2021-03-12T20:05:13.633604 | 2015-09-30T01:57:45 | 2015-09-30T01:57:45 | 42,382,038 | 0 | 0 | null | null | null | null | GB18030 | Python | false | false | 1,197 | py | #coding:gbk
'''
Created on 2015年9月2日
@author: fxy
'''
#朴素贝叶斯分类器
import docclass
class naivebayes(docclass.classifier):
def docprob(self,item,category):
features=self.getfeatures(item)
#将所有特征的概率相乘
p=1.0
for feature in features:
p*=self.weightedProb(feature, category, self.fprob)
return p
#计算p(category|feature*feature…………)
def prob(self,item,category):
catprob=self.catcount(category)/self.totalcount()
docprob=self.docprob(item, category)
return docprob*catprob
def classify(self,item,defalt=None):
probs={}
#寻找概率最大的分类
max=0.0
for category in self.categories():
probs[category]=self.prob(item, category)
if probs[category]>max:
max=probs[category]
best=category
#确保概率值超出阈值*次大概率值
for category in probs:
if category==best:
continue
if probs[category]*self.getThreshold(best)>probs[best]:
return defalt
return best | [
"937002565@qq.com"
] | 937002565@qq.com |
cbe0bee2021ee9e00108c613fe4a09506fe21021 | 5bad7ccd38b4ad78ed8ef09d84ca060b1686208e | /test/test_nutrient.py | a4fb8db63120a4015d342621eec2b3c5c16076ab | [
"Apache-2.0"
] | permissive | HallaApp/python-client | 6862ffbd7bdf3a0c2b8b255beb7f2a3f7004db0d | 326d796952cb99951f46bdf29c06798405dd78ce | refs/heads/master | 2023-03-22T19:16:02.251439 | 2021-03-17T21:49:18 | 2021-03-17T21:49:18 | 348,852,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,405 | py | # coding: utf-8
"""
Halla I/O
<p><strong>Getting Started:</strong></p> <ol type=\"1\"> <li><p>Obtain Credentials (Please Contact Halla to Obtain Credentials):</p> <ol type=\"a\"> <li><p><strong>'serviceAccount'</strong>: Add your Service Account in the <strong>header</strong> for all API requests to the Halla services. This is used to track API usage for authorization, billing, etc.</p></li> <li><p><strong>'key'</strong>: Add your API Key to the <strong>query</strong> for all API requests to the Halla services. This is used as a first line of defense to authenticate API requests.</p></li> </ol></li> <li><p>Add Your Catalog:</p> <ol type=\"a\"> <li><p>Use the <strong>POST STORE</strong> route to create a virtual product catalog. Please add a minimum of 1 <strong>thousand products per store</strong>, each with a <strong>'primaryId'</strong> and <strong>'label'</strong>. This will trigger Halla to index the catalog, allowing for Recommendation, Substitution, and Search services within minutes.</p></li> </ol></li> <li><p>Get Recommendations:</p> <ol type=\"a\"> <li><p>Use the <strong>GET PRODUCTS</strong> route and set the strategy to <strong>'recommend'</strong>.</p></li> <li><p>Fill in the <strong>'storeId'</strong> query parameter to use a specific catalog.</p></li> <li><p>Provide <strong>one or more</strong> of the following query parameters:</p> <ol type=\"i\"> <li><p><strong>'productId'</strong>: Biases recommendations to be relevant for a specific product.</p></li> <li><p><strong>'cartProductIds'</strong>: Biases recommendations to be relevant for all products in the cart.</p></li> <li><p><strong>'consumerId'</strong>: Biases recommendations to be relevant for the consumer's previous browsing and past purchase history.</p></li> </ol></li> <li><p>If multiple inputs are given, the recommendations will be blended to best satisfy multiple constraints.</p></li> </ol></li> <li><p>Get Substitutions:</p> <ol type=\"a\"> <li><p>Use the <strong>GET PRODUCTS</strong> route and set the strategy to <strong>'substitute'</strong>.</p></li> <li><p>Fill in the <strong>'storeId'</strong> query parameter to use a specific catalog.</p></li> <li><p>Fill in the <strong>'productId'</strong> query parameter.</p></li> </ol></li> <li><p>Get Search Results:</p> <ol type=\"a\"> <li><p>Use the <strong>GET PRODUCTS</strong> route and set the strategy to <strong>'search'</strong>.</p></li> <li><p>Fill in the <strong>'storeId'</strong> query parameter to use a specific catalog.</p></li> <li><p>Fill in the <strong>'text'</strong> query parameter.</p></li> </ol></li> <li><p>Supercharge Performance with Purchases:</p> <ol type=\"a\"> <li><p>Use the <strong>POST ORDER</strong> route to add one or more transactions to our system. Transactions will be used to fine tune our models to provide a better experience for your shoppers. To enable advanced personalization, please provide the <strong>'consumerId'</strong> field.</p></li> </ol></li> </ol> <p><strong>Advanced Integration:</strong></p> <ul> <li><p>Integrate Multi-Tenant Capabilities:</p> <ul> <li><p>Ensure that store and product <strong>ids</strong> are <strong>globally unique</strong> across all tenants. If needed, tenant name can be appended to the id in question to guarantee uniqueness.</p></li> <li><p>Attach <strong>'brand'</strong> field to allow for better personalization at scale.</p></li> </ul></li> <li><p>Enable Real-Time Inventory:</p> <ul> <li><p>Integrate the <strong>POST STORE</strong> route into your inventory management solution and do one of the following:</p> <ul> <li><p>Call the <strong>POST STORE</strong> route at regular intervals to overwrite existing store data.</p></li> <li><p>Call the <strong>ADD / DELETE</strong> product from store routes to update the catalog upon changes and current availabilities.</p></li> </ul></li> </ul></li> <li><p>(BETA) Enable Advanced Filtering:</p> <ul> <li><p>To enable SNAP, Own-Brand, Sponsored Product and other custom filters, create multiple virtual stores for each real store location. Each virtual store should correspond to a subset of products to include in the filter. Store ids can be generated by prepending the filter identifier to your store id.</p></li> </ul></li> <li><p>(BETA) Run an A/B Test:</p> <ul> <li><p>Work with your Halla Support Rep to define the scope of your A/B test.</p></li> <li><p>Call the <strong>POST ORDER</strong> route to add purchases with which to evaluate.</p></li> <li><p>If you are <strong>tracking spend</strong> between test groups, then it is <strong>required</strong> to attach the <strong>'campaign'</strong> field in the request body of the order.</p></li> <li><p>If you are <strong>testing at the consumer level</strong>, then it is <strong>required</strong> to attach the <strong>'consumerId'</strong> field in the request body of the order.</p></li> </ul></li> <li><p>(BETA) Add Fulfillment Data:</p> <ul> <li><p>Call the <strong>POST ORDER</strong> route multiple times corresponding to when an order is placed and later fulfilled. Set the <strong>'code'</strong> attribute in each item to <strong>'purchased' or 'fulfilled'</strong> corresponding to the order status.</p></li> </ul></li> </ul>
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.nutrient import Nutrient
class TestNutrient(unittest.TestCase):
""" Nutrient unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testNutrient(self):
"""
Test Nutrient
"""
model = swagger_client.models.nutrient.Nutrient()
if __name__ == '__main__':
unittest.main()
| [
"ospgospg@gmail.com"
] | ospgospg@gmail.com |
b25b89b0209466c2429a890c6abb637fb33ab2bb | 5c90661aedf5f830b672ad979c781c3a9f693e9f | /image/sample3.py | b91308ef7d15c24bcbfaf948dfd18a2e5d835135 | [] | no_license | Sylphy0052/PyGame_Sample | b3312ba0c731d46c002fc03e90e5612be03b7396 | 0cf971556b950e1b50014b473ebf8fbcae72c57a | refs/heads/master | 2020-04-14T19:33:12.073567 | 2019-03-17T08:49:27 | 2019-03-17T08:49:27 | 164,062,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 952 | py | import pygame
from pygame.locals import *
import sys
SCREEN_SIZE = (640, 480)
def load_image(filename, colorkey=None):
try:
image = pygame.image.load(filename)
except (pygame.error, message):
print("Cannot load image: {}".format(filename))
raise (SystemExit, message)
image = image.convert()
if colorkey is not None:
if colorkey is -1:
colorkey = image.get_at((0, 0))
image.set_colorkey(colorkey, RLEACCEL)
return image, image.get_rect()
if __name__ == '__main__':
pygame.init()
screen = pygame.display.set_mode(SCREEN_SIZE)
pygame.display.set_caption("Function of Image Load")
planeImg, planeRect = load_image("plane.png", colorkey=-1)
while True:
screen.fill((0,0,0))
screen.blit(planeImg, (200,100))
pygame.display.update()
for event in pygame.event.get():
if event.type == QUIT:
sys.exit()
| [
"ma17099@shibaura-it.ac.jp"
] | ma17099@shibaura-it.ac.jp |
c75f2a6a359a2b27092edb7aed9fe3b1166980a2 | 47e3f13ce4e42fc157db6580154acd3e9a7169d7 | /activejob/activejob/jobs/migrations/0001_initial.py | 87ebbdc0cf0ff645d03df89b5ce118469f0786c9 | [] | no_license | dreadkopp/activejob_bootstrap | d266c15565f1371cd9c271de093c9570e0511231 | 8bcdb73f1f95265a06a8e9c751113ccf0cce67eb | refs/heads/master | 2020-12-30T13:40:19.351153 | 2017-08-22T15:48:28 | 2017-08-22T15:48:28 | 91,242,018 | 0 | 1 | null | 2017-08-22T15:48:29 | 2017-05-14T12:30:55 | Python | UTF-8 | Python | false | false | 2,801 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-18 18:30
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('status', models.CharField(max_length=100)),
('phone', models.CharField(max_length=100)),
('mail', models.EmailField(max_length=254)),
],
),
migrations.CreateModel(
name='Job',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('slug', models.SlugField()),
('location', models.CharField(max_length=100)),
('description', models.TextField()),
('profile', models.TextField()),
('perspective', models.TextField()),
('is_intern', models.BooleanField()),
('company', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='jobs.Company')),
('contact', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='jobs.Contact')),
],
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('city', models.CharField(max_length=100)),
('street', models.CharField(max_length=100)),
('phone', models.CharField(max_length=100)),
('fax', models.CharField(max_length=100)),
('mail', models.EmailField(max_length=254)),
('gmaps_iframe_href', models.CharField(max_length=100)),
],
),
migrations.AddField(
model_name='contact',
name='location',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='jobs.Location'),
),
]
| [
"dm@xanadu.dd.activ-job.intra"
] | dm@xanadu.dd.activ-job.intra |
0b9a8f51986a637727d2c4861dd3a0598f490b2f | 1522508ace6f366e17f6df3f36b09cc4042757c7 | /src/main/webapp/python/faceAverage.py | febc5a42aa20f6c56cfb7659fad4bbea1c18d101 | [] | no_license | webstorage119/AverageFaceServer2.0 | 022a9d6faf9487e463d6d0a47fc31b50c0a0afb3 | 6e9a985a665edae92b98af1a5c2ca3b60ad33852 | refs/heads/master | 2021-06-07T21:11:16.326668 | 2016-09-27T14:15:32 | 2016-09-27T14:15:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,185 | py | #!/usr/bin/env python
# coding=utf-8
# Copyright (c) 2016 Satya Mallick <spmallick@learnopencv.com>
# All rights reserved. No warranty, explicit or implicit, provided.
import os
import sys
import cv2
import dlib
import numpy as np
import math
from skimage import io
import time
path = sys.argv[1]
# Read points from text files in directory
def readPoints(path):
# Create an array of array of points.
pointsArray = [];
# List all files in the directory and read points from text files one by one
for filePath in os.listdir(path):
if filePath.endswith(".txt"):
# Create an array of points.
points = [];
# Read points from filePath
with open(os.path.join(path, filePath)) as file:
for line in file:
x, y = line.split()
points.append((int(x), int(y)))
# Store array of points
pointsArray.append(points)
return pointsArray;
# Read all jpg images in folder.
def readImages(path):
# Create array of array of images.
imagesArray = [];
# List all files in the directory and read points from text files one by one
for filePath in os.listdir(path):
if filePath.endswith(".jpg") or filePath.endswith(".png"):
# Read image found.
img = cv2.imread(os.path.join(path, filePath));
# Convert to floating point
img = np.float32(img) / 255.0;
# Add to array of images
imagesArray.append(img);
return imagesArray;
# Compute similarity transform given two sets of two points.
# OpenCV requires 3 pairs of corresponding points.
# We are faking the third one.
def similarityTransform(inPoints, outPoints):
s60 = math.sin(60 * math.pi / 180);
c60 = math.cos(60 * math.pi / 180);
inPts = np.copy(inPoints).tolist();
outPts = np.copy(outPoints).tolist();
xin = c60 * (inPts[0][0] - inPts[1][0]) - s60 * (inPts[0][1] - inPts[1][1]) + inPts[1][0];
yin = s60 * (inPts[0][0] - inPts[1][0]) + c60 * (inPts[0][1] - inPts[1][1]) + inPts[1][1];
inPts.append([np.int(xin), np.int(yin)]);
xout = c60 * (outPts[0][0] - outPts[1][0]) - s60 * (outPts[0][1] - outPts[1][1]) + outPts[1][0];
yout = s60 * (outPts[0][0] - outPts[1][0]) + c60 * (outPts[0][1] - outPts[1][1]) + outPts[1][1];
outPts.append([np.int(xout), np.int(yout)]);
tform = cv2.estimateRigidTransform(np.array([inPts]), np.array([outPts]), False);
return tform;
# Check if a point is inside a rectangle
def rectContains(rect, point):
if point[0] < rect[0]:
return False
elif point[1] < rect[1]:
return False
elif point[0] > rect[2]:
return False
elif point[1] > rect[3]:
return False
return True
# Calculate delanauy triangle
def calculateDelaunayTriangles(rect, points):
# Create subdiv
subdiv = cv2.Subdiv2D(rect);
# Insert points into subdiv
for p in points:
subdiv.insert((p[0], p[1]));
# List of triangles. Each triangle is a list of 3 points ( 6 numbers )
triangleList = subdiv.getTriangleList();
# Find the indices of triangles in the points array
delaunayTri = []
for t in triangleList:
pt = []
pt.append((t[0], t[1]))
pt.append((t[2], t[3]))
pt.append((t[4], t[5]))
pt1 = (t[0], t[1])
pt2 = (t[2], t[3])
pt3 = (t[4], t[5])
if rectContains(rect, pt1) and rectContains(rect, pt2) and rectContains(rect, pt3):
ind = []
for j in xrange(0, 3):
for k in xrange(0, len(points)):
if (abs(pt[j][0] - points[k][0]) < 1.0 and abs(pt[j][1] - points[k][1]) < 1.0):
ind.append(k)
if len(ind) == 3:
delaunayTri.append((ind[0], ind[1], ind[2]))
return delaunayTri
def constrainPoint(p, w, h):
p = (min(max(p[0], 0), w - 1), min(max(p[1], 0), h - 1))
return p;
# Apply affine transform calculated using srcTri and dstTri to src and
# output an image of size.
def applyAffineTransform(src, srcTri, dstTri, size):
# Given a pair of triangles, find the affine transform.
warpMat = cv2.getAffineTransform(np.float32(srcTri), np.float32(dstTri))
# Apply the Affine Transform just found to the src image
dst = cv2.warpAffine(src, warpMat, (size[0], size[1]), None, flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT_101)
return dst
# Warps and alpha blends triangular regions from img1 and img2 to img
def warpTriangle(img1, img2, t1, t2):
# Find bounding rectangle for each triangle
r1 = cv2.boundingRect(np.float32([t1]))
r2 = cv2.boundingRect(np.float32([t2]))
# Offset points by left top corner of the respective rectangles
t1Rect = []
t2Rect = []
t2RectInt = []
for i in xrange(0, 3):
t1Rect.append(((t1[i][0] - r1[0]), (t1[i][1] - r1[1])))
t2Rect.append(((t2[i][0] - r2[0]), (t2[i][1] - r2[1])))
t2RectInt.append(((t2[i][0] - r2[0]), (t2[i][1] - r2[1])))
# Get mask by filling triangle
mask = np.zeros((r2[3], r2[2], 3), dtype=np.float32)
cv2.fillConvexPoly(mask, np.int32(t2RectInt), (1.0, 1.0, 1.0), 16, 0);
# Apply warpImage to small rectangular patches
img1Rect = img1[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]
size = (r2[2], r2[3])
img2Rect = applyAffineTransform(img1Rect, t1Rect, t2Rect, size)
img2Rect = img2Rect * mask
# Copy triangular region of the rectangular patch to the output image
img2[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] = img2[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] * (
(1.0, 1.0, 1.0) - mask)
img2[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] = img2[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] + img2Rect
# 源程序是用sys.argv从命令行参数去获取训练模型,精简版我直接把路径写在程序中了
predictor_path = "./shape_predictor_68_face_landmarks.dat"
# 与人脸检测相同,使用dlib自带的frontal_face_detector作为人脸检测器
detector = dlib.get_frontal_face_detector()
# 使用官方提供的模型构建特征提取器
predictor = dlib.shape_predictor(predictor_path)
# get_landmarks()函数会将一个图像转化成numpy数组,并返回一个68 x2元素矩阵,输入图像的每个特征点对应每行的一个x,y坐标。
def get_landmarks(im):
rects = detector(im, 1)
return np.matrix([[p.x, p.y] for p in predictor(im, rects[0]).parts()])
if __name__ == '__main__':
# path = 'input/ruby/'
# 读取文件夹下的图片,生成每张图片的landmark,并保存为txt
pathDir = os.listdir(path)
for item in pathDir:
child = os.path.join('%s%s' % (path, item))
if child.endswith('.jpg') or child.endswith('.png'):
text_name = path + os.path.splitext(item)[0] + '.txt'
if not os.path.exists(text_name):
img = io.imread(child)
result = get_landmarks(img)
np.savetxt(text_name, result, fmt="%d %d")
print child
# Dimensions of output image
w = 440;
h = 586;
# Read points for all images
allPoints = readPoints(path);
# Read all images
images = readImages(path);
# Eye corners
eyecornerDst = [(np.int(0.3 * w), np.int(h / 3)), (np.int(0.7 * w), np.int(h / 3))];
imagesNorm = [];
pointsNorm = [];
# Add boundary points for delaunay triangulation
boundaryPts = np.array(
[(0, 0), (w / 2, 0), (w - 1, 0), (w - 1, h / 2), (w - 1, h - 1), (w / 2, h - 1), (0, h - 1), (0, h / 2)]);
# Initialize location of average points to 0s
pointsAvg = np.array([(0, 0)] * (len(allPoints[0]) + len(boundaryPts)), np.float32());
n = len(allPoints[0]);
numImages = len(images)
# Warp images and trasnform landmarks to output coordinate system,
# and find average of transformed landmarks.
for i in xrange(0, numImages):
points1 = allPoints[i];
# Corners of the eye in input image
eyecornerSrc = [allPoints[i][36], allPoints[i][45]];
# Compute similarity transform
tform = similarityTransform(eyecornerSrc, eyecornerDst);
# Apply similarity transformation
img = cv2.warpAffine(images[i], tform, (w, h));
# Apply similarity transform on points
points2 = np.reshape(np.array(points1), (68, 1, 2));
points = cv2.transform(points2, tform);
points = np.float32(np.reshape(points, (68, 2)));
# Append boundary points. Will be used in Delaunay Triangulation
points = np.append(points, boundaryPts, axis=0)
# Calculate location of average landmark points.
pointsAvg = pointsAvg + points / numImages;
pointsNorm.append(points);
imagesNorm.append(img);
# Delaunay triangulation
rect = (0, 0, w, h);
dt = calculateDelaunayTriangles(rect, np.array(pointsAvg));
# Output image
output = np.zeros((h, w, 3), np.float32());
# Warp input images to average image landmarks
for i in xrange(0, len(imagesNorm)):
img = np.zeros((h, w, 3), np.float32());
# Transform triangles one by one
for j in xrange(0, len(dt)):
tin = [];
tout = [];
for k in xrange(0, 3):
pIn = pointsNorm[i][dt[j][k]];
pIn = constrainPoint(pIn, w, h);
pOut = pointsAvg[dt[j][k]];
pOut = constrainPoint(pOut, w, h);
tin.append(pIn);
tout.append(pOut);
warpTriangle(imagesNorm[i], img, tin, tout);
# Add image intensities for averaging
output = output + img;
# Divide by numImages to get average
output = output / numImages;
img_path = 'output/'
# img_name = time.strftime("%Y%m%d-%H%M%S") + '.jpg'
img_name = 'lalala_python.jpg'
print(img_name)
cv2.imwrite(img_path + img_name, output * 255)
# Display result
cv2.startWindowThread()
cv2.namedWindow("image", 1)
cv2.imshow('image', output);
cv2.waitKey(0);
| [
"simoncherry@sina.com"
] | simoncherry@sina.com |
bdd9bde6ac4d525d029a7f01e0a25d1437f68c1d | a5e50ee7c94feae560ac169064f9af67197071c7 | /Chatroom/chat/routing.py | 2d68790ba0f1bfadb80d827cbca5a93e2e12a469 | [] | no_license | AmaanNaikwadi/Public-Chatroom1 | 0683bb314459f89e03ab426dbc46ec402c49b183 | 1d44de92923be2c6715cec5b5d1c9a821d9928f8 | refs/heads/master | 2023-06-22T08:30:41.843079 | 2021-07-24T18:06:06 | 2021-07-24T18:06:06 | 383,660,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | from django.urls import re_path
from . import consumers
from channels.routing import ProtocolTypeRouter, URLRouter
from channels.auth import AuthMiddlewareStack
websocket_urlpatterns = [
re_path(r'ws/chat/', consumers.ChatRoomConsumer.as_asgi()),
re_path(r'ws/(?P<group_name>\w+)/$', consumers.GroupChatConsumer.as_asgi()),
re_path(r'ws/(?P<username>\w+)/$', consumers.ChatConsumer.as_asgi()),
]
| [
"amaannaikwadi@gmail.com"
] | amaannaikwadi@gmail.com |
6ff08466a1384f6af041270b3190c0fffa2b4e7f | f0801ad1a4e4097a7026cca8767e88fe74036ea7 | /main/migrations/backup/0003_auto_20170218_0804.py | a79bd7966d057587a348b1ba3e8ac2458e8413f0 | [] | no_license | meadhikari/django-crm | 4ca446e020f07c50286a4d6debb5ecbf275abb39 | 944319ed0ead8aa1911b8ba2d66b390411972f35 | refs/heads/master | 2021-01-19T13:42:17.014964 | 2017-02-18T19:32:39 | 2017-02-18T19:32:41 | 82,411,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,849 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-18 08:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0002_auto_20170218_0748'),
]
operations = [
migrations.AddField(
model_name='customer',
name='batoko_kisim',
field=models.CharField(choices=[(b'kacchi', '\u0915\u091a\u094d\u091a\u0940'), (b'sahayek', '\u0938\u093e\u0939\u092f\u0947\u0915'), (b'pichbato', '\u092a\u0940\u091a\u092c\u093e\u091f\u094b'), (b'mukhya_pichbato', '\u092e\u0941\u0916\u094d\u092f \u092a\u093f\u091a\u092c\u093e\u091f\u094b')], default=None, max_length=100, verbose_name=b'\xe0\xa4\xac\xe0\xa4\xbe\xe0\xa4\x9f\xe0\xa5\x8b \xe0\xa4\x95\xe0\xa5\x8b \xe0\xa4\x95\xe0\xa4\xbf\xe0\xa4\xb8\xe0\xa4\xbf\xe0\xa4\xae'),
),
migrations.AddField(
model_name='customer',
name='ghar_ko_kisim',
field=models.CharField(choices=[(b'kacchi', '\u0915\u091a\u094d\u091a\u0940'), (b'pakki', '\u092a\u0915\u094d\u0915\u093f')], default=None, max_length=100, verbose_name=b'\xe0\xa4\x98\xe0\xa4\xb0 \xe0\xa4\x95\xe0\xa5\x8b \xe0\xa4\x95\xe0\xa4\xbf\xe0\xa4\xb8\xe0\xa4\xbf\xe0\xa4\xae'),
),
migrations.AlterField(
model_name='customer',
name='chetra',
field=models.CharField(choices=[(b'awasiya', '\u0906\u0935\u093e\u0938\u0940\u092f'), (b'angsik_bajar', '\u0905\u0928\u094d\u0917\u094d\u0938\u093f\u0915 \u092c\u091c\u093e\u0930'), (b'bajar', '\u092c\u091c\u093e\u0930'), (b'mukhya_bajar', '\u092e\u0941\u0916\u094d\u092f \u092c\u091c\u093e\u0930')], default=None, max_length=100, verbose_name=b'\xe0\xa4\x95\xe0\xa5\x8d\xe0\xa4\xb7\xe0\xa5\x87\xe0\xa4\xa4\xe0\xa5\x8d\xe0\xa4\xb0 '),
),
]
| [
"salik.adhikari@gmail.com"
] | salik.adhikari@gmail.com |
280d6693da1ac1395f21a1816d99169a91c8a6d6 | f653dcf96b79a0b43650a1fb00146c8089c46e02 | /23.py | a2c9eb2011fe8e74e9f8d01133962b8f94d7174c | [] | no_license | nishtha-rokde/DSA | 7877e72bc562902555df33d00f67f6f69d1bcbfa | 38741c121ef9a6731a53b0023327e44bc8e3e3f3 | refs/heads/main | 2023-01-01T13:43:13.544150 | 2020-10-24T17:05:31 | 2020-10-24T17:05:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | def arr_sum(arr,sum):
arr.sort()
start = 0
end = len(arr) - 1
list = []
count = 0
while start < end:
if (arr[start] + arr[end]) < sum:
start +=1
elif (arr[start] + arr[end]) > sum:
end -= 1
elif (arr[start] + arr[end]) == sum:
list.append([arr[start], arr[end]])
count += 1
start+=1
end-=1
return list,count
print(arr_sum([1,5,7,-1],6))
| [
"nishtharokde@gmail.com"
] | nishtharokde@gmail.com |
6c3738a82dfea2e1e403e4977157e4892817fe62 | a459d1413a65f1bf8ed982342f4ba9a1921f4a0c | /students/views/journal.py | 6aac3af7e786afaa1544dd8e22ae507de936774d | [] | no_license | Igorisius/project_studentsdb | 94c75e1f7c15e6a60aa71e9acdb443b8ada1ecc2 | ee37fc03c601c6ce5ff68baea862ac8e812f3379 | refs/heads/master | 2020-12-30T09:38:18.072616 | 2015-04-05T21:15:32 | 2015-04-05T21:15:32 | 28,105,069 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 613 | py | # -*- coding: utf-8 -*-
from django.shortcuts import render
from django.http import HttpResponse
def journal_list(request):
journal = (
{'id': 1,
'last_name': u'Петро Білочка'},
{'id': 2,
'last_name': u'Іван Іванович'},
{'id': 3,
'last_name': u'Михайло Пилипенко'},
{'id': 4,
'last_name': u'Хтоб Небув'},
)
return render(request, 'students/journal_list.html',
{'journal': journal})
def journal_update(request, sid):
return HttpResponse('<h1>Update Journal %s</h1>' % sid)
| [
"Sakivskyy.igor@gmail.com"
] | Sakivskyy.igor@gmail.com |
c252ebbcecd170aa13eef5bf53383465b4098786 | b88c7f892b4ec97a1bfecc1ca15b4014f3d9257e | /nasbench_asr/training/tf/datasets/text_encoder.py | 98262f2021b219d8e35726114ba0fa17da044327 | [
"Apache-2.0"
] | permissive | akhauriyash/nb-asr | 66b0d1dcf5c769763bb2945c130e17756c523164 | 8889f37081ebbde253da1589d13fe3bc9ccd9ef8 | refs/heads/main | 2023-06-23T05:20:41.390868 | 2021-07-22T20:50:51 | 2021-07-22T20:50:51 | 388,593,693 | 0 | 0 | Apache-2.0 | 2021-07-22T20:50:18 | 2021-07-22T20:50:17 | null | UTF-8 | Python | false | false | 1,829 | py | # pylint: skip-file
from nasbench_asr.quiet_tensorflow import tensorflow as tf
from .phoneme_encoder import PhonemeEncoder
def get_utf8_valid_sentence(sentence):
return sentence.numpy()
def get_corpus_generator(ds):
for _, sentence in ds:
yield get_utf8_valid_sentence(sentence)
def get_encoded_from_sentence_fn(encoder):
def get_encoded_from_sentence_helper(sentence):
# the following [] are essential!
encoded = [encoder.encode(get_utf8_valid_sentence(sentence))]
return encoded
def get_encoded_from_sentence(sentence):
# the following [] are essential!
encoded = tf.py_function(get_encoded_from_sentence_helper, [sentence],
tf.int32)
return encoded
return get_encoded_from_sentence
def get_decoded_from_encoded_fn(encoder):
def get_decoded_from_encoded_helper(encoded):
# the following [] are essential!
decoded = [
get_utf8_valid_sentence(
tf.constant(encoder.decode(encoded.numpy().tolist())))
]
return decoded
def get_decoded_from_encoded(encoded):
# the following [] are essential!
decoded = tf.py_function(get_decoded_from_encoded_helper, [encoded],
tf.string)
return decoded
return get_decoded_from_encoded
class TextEncoder:
def __init__(
self,
encoder_class,
):
if encoder_class != 'phoneme':
raise ValueError('Unsupported encoder type {!r}'.format(encoder_class))
self.encoder_class = encoder_class
self.encoder = PhonemeEncoder()
self.get_encoded_from_sentence = get_encoded_from_sentence_fn(self.encoder)
self.get_decoded_from_encoded = get_decoded_from_encoded_fn(self.encoder)
| [
"l.dudziak@samsung.com"
] | l.dudziak@samsung.com |
2cd12e80087b56b034e20a935df8bd724ed19e13 | b277ca06cb0c33635e31928a3643c85f67623af4 | /buildenv/lib/python3.5/site-packages/sphinx/ext/mathjax.py | 7fb3b17ad61bc87381b6d2036ece930c89f04dce | [
"LicenseRef-scancode-public-domain",
"CC-BY-4.0"
] | permissive | angrycaptain19/container-camp | a3e5c9b9f130776c842032148fcdba094bc0da8f | b0b14fe30aee310cb3775c1491d5b6304173936b | refs/heads/master | 2023-03-12T18:04:13.700249 | 2021-03-01T23:02:30 | 2021-03-01T23:02:30 | 343,728,529 | 0 | 0 | NOASSERTION | 2021-03-02T10:30:35 | 2021-03-02T10:07:11 | null | UTF-8 | Python | false | false | 3,258 | py | # -*- coding: utf-8 -*-
"""
sphinx.ext.mathjax
~~~~~~~~~~~~~~~~~~
Allow `MathJax <http://mathjax.org/>`_ to be used to display math in
Sphinx's HTML writer -- requires the MathJax JavaScript library on your
webserver/computer.
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from docutils import nodes
import sphinx
from sphinx.locale import _
from sphinx.errors import ExtensionError
from sphinx.ext.mathbase import setup_math as mathbase_setup
def html_visit_math(self, node):
self.body.append(self.starttag(node, 'span', '', CLASS='math'))
self.body.append(self.builder.config.mathjax_inline[0] +
self.encode(node['latex']) +
self.builder.config.mathjax_inline[1] + '</span>')
raise nodes.SkipNode
def html_visit_displaymath(self, node):
self.body.append(self.starttag(node, 'div', CLASS='math'))
if node['nowrap']:
self.body.append(self.encode(node['latex']))
self.body.append('</div>')
raise nodes.SkipNode
# necessary to e.g. set the id property correctly
if node['number']:
self.body.append('<span class="eqno">(%s)' % node['number'])
self.add_permalink_ref(node, _('Permalink to this equation'))
self.body.append('</span>')
self.body.append(self.builder.config.mathjax_display[0])
parts = [prt for prt in node['latex'].split('\n\n') if prt.strip()]
if len(parts) > 1: # Add alignment if there are more than 1 equation
self.body.append(r' \begin{align}\begin{aligned}')
for i, part in enumerate(parts):
part = self.encode(part)
if r'\\' in part:
self.body.append(r'\begin{split}' + part + r'\end{split}')
else:
self.body.append(part)
if i < len(parts) - 1: # append new line if not the last equation
self.body.append(r'\\')
if len(parts) > 1: # Add alignment if there are more than 1 equation
self.body.append(r'\end{aligned}\end{align} ')
self.body.append(self.builder.config.mathjax_display[1])
self.body.append('</div>\n')
raise nodes.SkipNode
def builder_inited(app):
if not app.config.mathjax_path:
raise ExtensionError('mathjax_path config value must be set for the '
'mathjax extension to work')
app.add_javascript(app.config.mathjax_path)
def setup(app):
try:
mathbase_setup(app, (html_visit_math, None), (html_visit_displaymath, None))
except ExtensionError:
raise ExtensionError('sphinx.ext.mathjax: other math package is already loaded')
# more information for mathjax secure url is here:
# http://docs.mathjax.org/en/latest/start.html#secure-access-to-the-cdn
app.add_config_value('mathjax_path',
'https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/MathJax.js?'
'config=TeX-AMS-MML_HTMLorMML', False)
app.add_config_value('mathjax_inline', [r'\(', r'\)'], 'html')
app.add_config_value('mathjax_display', [r'\[', r'\]'], 'html')
app.connect('builder-inited', builder_inited)
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
| [
"mmsprinkle@gmail.com"
] | mmsprinkle@gmail.com |
0daf65ce49a311fdb34c04109af046ccda6c1f28 | 9a50b0a97c5caf12bcfc7fff8cee0b72326e2fc2 | /worksheet_12a.py | 70813069bc0a715121589f75df5b471280599008 | [] | no_license | seannydududu/schoolproject | e3e88dd695f7a2633201f6542ec0eab7c05508d5 | 4388e63a52fd452b01ae90e39990ad486c8419f4 | refs/heads/main | 2023-05-26T21:38:52.296951 | 2021-06-08T05:33:40 | 2021-06-08T05:33:40 | 374,891,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,490 | py | #Question 1
def qn1():
name = input("Enter your name:")
index = input("Enter your index number:")
class_name = input("Enter your class:")
print("Hi, I am {} from class {} and my index number is {}.\nNice to meet you.".format(name,class_name,index))
#Question3
def qn3():
counter = 0
total = 0
user_input = int(input("Enter a multiplication factor:"))
while counter < 11:
total = counter * user_input
print("{} x {} = {}".format(counter,user_input,total))
counter += 1
total = 0
#Question 5
def qn5():
user_input = input("Enter your name: ")
if 'z' in user_input:
print("Your a BAD person.")
else:
print("Your a GOOD person.")
#Question 6
def qn6():
user_input = input("Enter a sentence:")
user_input = user_input.replace(" ","")
print(user_input)
#quesion 7
def qn7():
count = 1
word = ""
user_input = input("Enter a word:")
user_input2 = input("a)iteration or b)indexing")
if user_input2 == "a":
for letters in user_input:
word += user_input[-count]
count += 1
print(word)
if user_input2 == 'b':
print(user_input[::-1])
#Question 8
def qn8():
user_input = input("Enter a phrase")
print(user_input.title())
#Question 9
def qn9():
word = ""
user_input = input("Enter a sentence:")
splited = user_input.split()
reversed_sentence = ' '.join(reversed(splited))
print(reversed_sentence)
| [
"progamer12323432@gmail.com"
] | progamer12323432@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.