blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
34698f6c132ed077c67d3a15f869d8d78bcefe61
|
3e3863e9eced23d646cd039a395b08ed6d1f3929
|
/training/medium/robbery-optimisation.py
|
ca3d8f40995da67cf5301d321958008d3857282b
|
[] |
no_license
|
Coni63/CG_repo
|
dd608bdbd2560598a72339d150ec003e6b688cac
|
d30e01dfe2a12e26c85799c82cf38e606ffdbc16
|
refs/heads/master
| 2020-06-25T04:03:54.260340
| 2019-10-20T16:16:40
| 2019-10-20T16:16:40
| 199,195,242
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 492
|
py
|
import sys
import math
# Auto-generated code below aims at helping you parse
# the standard input according to the problem statement.
n = int(input())
housevalue = [int(input()) for i in range(n)]
n = len(housevalue)
pn = housevalue[0]
qn = 0
for i in range(1, n):
pn1 = pn
qn1 = qn
pn = qn1 + housevalue[i]
qn = max(pn1, qn1)
# Write an action using print
# To debug: print("Debug messages...", file=sys.stderr)
print(max(pn,qn))
|
[
"="
] |
=
|
8eef75cab1181157c9944e567533b91f03ae8168
|
7f0c02b3eef636cc382484dd8015207c35cc83a8
|
/lib/python/treadmill/runtime/linux/image/_docker.py
|
fa24bbf1455842f1673f31d8a4867769d207bc30
|
[
"Apache-2.0"
] |
permissive
|
ceache/treadmill
|
4efa69482dafb990978bfdcb54b24c16ca5d1147
|
26a1f667fe272ff1762a558acfd66963494020ca
|
refs/heads/master
| 2021-01-12T12:44:13.474640
| 2019-08-20T23:22:37
| 2019-08-20T23:22:37
| 151,146,942
| 0
| 0
|
Apache-2.0
| 2018-10-01T19:31:51
| 2018-10-01T19:31:51
| null |
UTF-8
|
Python
| false
| false
| 4,405
|
py
|
"""Docker funtion in linux runtime
"""
import grp # pylint: disable=import-error
import io
import logging
import os
from treadmill import exc
from treadmill import fs
from treadmill import subproc
from treadmill import supervisor
from treadmill import utils
from treadmill import dockerutils
from treadmill.appcfg import abort as app_abort
from treadmill.fs import linux as fs_linux
from .. import _manifest
_LOGGER = logging.getLogger(__name__)
_CONTAINER_DOCKER_ENV_DIR = os.path.join('docker', 'env')
_CONTAINER_DOCKER_ETC_DIR = os.path.join('docker', 'etc')
_PASSWD_PATTERN = '{NAME}:x:{UID}:{GID}:{INFO}:{HOME}:{SHELL}'
_GROUP_PATTERN = '{NAME}:x:{GID}'
def _has_docker(app):
return hasattr(app, 'docker') and app.docker
def create_docker_environ_dir(container_dir, root_dir, app):
"""Creates environ dir for docker"""
if not _has_docker(app):
return
env_dir = os.path.join(container_dir, _CONTAINER_DOCKER_ENV_DIR)
env = {}
treadmill_bind_preload_so = os.path.basename(
subproc.resolve('treadmill_bind_preload.so')
)
if app.ephemeral_ports.tcp or app.ephemeral_ports.udp:
env['LD_PRELOAD'] = os.path.join(
_manifest.TREADMILL_BIND_PATH,
'$LIB',
treadmill_bind_preload_so
)
supervisor.create_environ_dir(env_dir, env)
# Bind the environ directory in the container volume
fs.mkdir_safe(os.path.join(root_dir, _CONTAINER_DOCKER_ENV_DIR))
fs_linux.mount_bind(
root_dir, os.path.join(os.sep, _CONTAINER_DOCKER_ENV_DIR),
source=os.path.join(container_dir, _CONTAINER_DOCKER_ENV_DIR),
recursive=False, read_only=True
)
def prepare_docker_daemon_path(newroot_norm, app, data):
"""Mount tmpfs for docker
"""
if not _has_docker(app):
return
# /etc/docker as temp fs as dockerd create /etc/docker/key.json
try:
fs_linux.mount_tmpfs(newroot_norm, '/etc/docker')
except FileNotFoundError as err:
_LOGGER.error('Failed to mount docker tmpfs: %s', err)
# this exception is caught by sproc run to generate abort event
raise exc.ContainerSetupError(
msg=str(err),
reason=app_abort.AbortedReason.UNSUPPORTED,
)
# Setup the dockerd confdir
dockerutils.prepare_docker_confdir(
os.path.join(newroot_norm, 'etc', 'docker'),
app,
data
)
def overlay_docker(container_dir, root_dir, app):
"""Mount etc/hosts for docker container
"""
# FIXME: This path is mounted as RW because ro volume in treadmill
# container can not be mounted in docker 'Error response from
# daemon: chown /etc/hosts: read-only file system.'
if not _has_docker(app):
return
overlay_dir = os.path.join(container_dir, 'overlay')
fs_linux.mount_bind(
root_dir, os.path.join(os.sep, _CONTAINER_DOCKER_ETC_DIR, 'hosts'),
source=os.path.join(overlay_dir, 'etc/hosts'),
recursive=False, read_only=False
)
_create_overlay_passwd(root_dir, app.proid)
_create_overlay_group(root_dir, app.proid)
def _create_overlay_group(root_dir, proid):
"""create a overlay /etc/group in oder to mount into container
"""
path = os.path.join(root_dir, _CONTAINER_DOCKER_ETC_DIR, 'group')
(_uid, gid) = utils.get_uid_gid(proid)
with io.open(path, 'w') as f:
root = _GROUP_PATTERN.format(
NAME='root',
GID=0
)
f.write('{}\n'.format(root))
group = _GROUP_PATTERN.format(
NAME=grp.getgrgid(gid).gr_name,
GID=gid
)
f.write('{}\n'.format(group))
def _create_overlay_passwd(root_dir, proid):
"""create a overlay /etc/passwd in order to mount into container
"""
path = os.path.join(root_dir, _CONTAINER_DOCKER_ETC_DIR, 'passwd')
(uid, gid) = utils.get_uid_gid(proid)
with io.open(path, 'w') as f:
root = _PASSWD_PATTERN.format(
NAME='root',
UID=0,
GID=0,
INFO='root',
HOME='/root',
SHELL='/bin/sh'
)
f.write('{}\n'.format(root))
user = _PASSWD_PATTERN.format(
NAME=proid,
UID=uid,
GID=gid,
INFO='',
HOME='/',
SHELL='/sbin/nologin'
)
f.write('{}\n'.format(user))
|
[
"ceache@users.noreply.github.com"
] |
ceache@users.noreply.github.com
|
224805ac66be7bfd45da0592751b445c9731c9ef
|
61617c81e9be91abb6d19ac0205c7fc51bc73658
|
/decompose_matrix.py
|
50e46db26708dfee6b3a2bab01c229c9c8e22a7c
|
[
"MIT"
] |
permissive
|
vtlim/GLIC
|
2a5982352a2fc037d98ecfee1159ed3fa9f6c8e4
|
90e00e7030748c70ad284cda8785745b6c16ecbb
|
refs/heads/master
| 2020-06-13T13:05:33.913607
| 2020-02-04T02:07:58
| 2020-02-04T02:07:58
| 194,664,685
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 770
|
py
|
import sys
sys.path.insert(0, "/nethome/vlim/Desktop/Project/scripts/")
import transformations
import numpy as np
m = [\
[0.6367010474205017, 0.7711036205291748, -0.0033135463017970324, 36.229270935058594],\
[0.7709313631057739, -0.6366397142410278, -0.018834583461284637, 104.86688995361328], \
[-0.01663294993340969, 0.009437481872737408, -0.9998171329498291, 191.3174285888672],\
[0.0, 0.0, 0.0, 1.0]]
print('\nThe input transformation matrix is:\n{}\n'.format(np.matrix(m)))
scale, shear, angles, translate, perspective = transformations.decompose_matrix(m)
print('scale:\n{}\n'.format(scale))
print('shear:\n{}\n'.format(shear))
print('angles:\n{}\n'.format(angles))
print('translate:\n{}\n'.format(translate))
print('perspective:\n{}\n'.format(perspective))
|
[
"lim.t.victoria@gmail.com"
] |
lim.t.victoria@gmail.com
|
58ed5ed1158721b68ff7e20e2c2c2fee82c7db8b
|
614950a965efeec17d9b7aa521ca701ba32f004c
|
/Prediction_Months.py
|
b86ff1d8971889dbfccca98ce0cc086defe63237
|
[] |
no_license
|
constantinirimia/Stock-Market-Prediction-using-Machine-Learning-Algorithms
|
a0d3100970eac529932fb8c0fc8e1f43ad2b59e4
|
959341161e9a8e9a27a51afd4fc9d12f358bf8e4
|
refs/heads/master
| 2022-04-12T22:37:49.475961
| 2020-04-02T19:17:19
| 2020-04-02T19:17:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,161
|
py
|
import pandas as pd
import numpy as np
from sklearn.linear_model import Ridge
from sklearn.neighbors import KNeighborsRegressor
from sklearn.pipeline import make_pipeline
from sklearn.svm import SVR
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.model_selection import train_test_split
from sklearn import svm
from sklearn import preprocessing
from scipy import stats
import wx
from tkinter import *
from tkinter import messagebox
from sklearn.preprocessing import MinMaxScaler
plt.style.use('fivethirtyeight')
"""
First lets create the average of the months.
It will be created calculating the Adj Close and dividing it at the numbers of dats
"""
# Calculate average of months
march = pd.read_csv('AAPL_March.csv')
april = pd.read_csv('AAPL_April.csv')
may = pd.read_csv('AAPL_May.csv')
june = pd.read_csv('AAPL_June.csv')
july = pd.read_csv('AAPL_July.csv')
august = pd.read_csv('AAPL_August.csv')
september = pd.read_csv('AAPL_September.csv')
october = pd.read_csv('AAPL_October.csv')
november = pd.read_csv('AAPL_November.csv')
december = pd.read_csv('AAPL_December.csv')
january = pd.read_csv('AAPL_January2020.csv')
february = pd.read_csv('AAPL_February2020.csv')
# -------------------------------
data = pd.read_csv('MonthlyAverage.csv')
df = np.array(data['Avg Close Price'])
def normalizeData(dataFrame):
myData = pd.read_csv('MonthlyAverage.csv')
df = np.array(myData['Avg Close Price'])
# Normalize the data using z-score
normalizedData = stats.zscore(df)
return normalizedData
'''
# --------------------------------------
'''
def calculateAverage(myData):
# Get all of the rows from the Close column
closePrice = np.array(myData['Adj Close'])
numOfDays = (int(len(closePrice)))
total = 0
for x in range(0, numOfDays):
total = total + closePrice[x]
average = total / numOfDays
return average
avgMarch = calculateAverage(march)
avgApril = calculateAverage(april)
avgMay = calculateAverage(may)
avgJune = calculateAverage(june)
avgJuly = calculateAverage(july)
avgAugust = calculateAverage(august)
avgSeptember = calculateAverage(september)
avgOctober = calculateAverage(october)
avgNovember = calculateAverage(november)
avgDecember = calculateAverage(december)
avgJanuary = calculateAverage(january)
avgFebruary = calculateAverage(february)
print(" -------------------------------------------------------------------------------")
print(" ")
print("Values of monthly averages: ")
print(" ")
print("March: ", avgMarch, " ", "April: ", avgApril, " ", "May: ", avgMay, " ")
print("June: ", avgJune, " ", "July: ", avgJuly, " ", "August: ", avgAugust, " ")
print("September: ", avgSeptember, " ", "October: ", avgOctober, " ", "November ", avgNovember, " ")
print("December: ", avgDecember, " ", "January: ", avgJanuary, " ", "February: ", avgFebruary)
print("--------------------------------------------------------------------------------")
print(" ")
'''
# This is a function to calculate the prediction of the 13th month
# We will be doing it by calculating the average of the total months averages and
# - if the prediction is higher than that it goes Up or else Down
'''
def predictionUpOrDown():
monthlyAverageData = pd.read_csv('MonthlyAverage.csv')
closePrice = np.array(monthlyAverageData['Avg Close Price'])
numMonths = (int(len(monthlyAverageData)))
avgY = 0
for x in range(0, numMonths):
avgY = avgY + closePrice[x]
averageYear = avgY / numMonths
return averageYear
print("Value of the whole year average: ")
print(" ")
a = predictionUpOrDown()
print(a)
print("--------------------------------------------------------------------------------")
print(" ")
###################
# Lets now plot the price history over the last 12 months
plt.figure(figsize=(8,6))
plt.title('Average Monthly Price of AAPL for the last 12 months')
plt.plot(data['Avg Close Price'])
plt.xlabel('Date in Months', fontsize=11)
plt.ylabel('Average Price', fontsize=11)
plt.show()
'''
# We, now have to read the new file of monthly averages and create and implement the Support vector machines
# plot the results of each kernel and then make the comparison to se if the 13th month is going up or down
'''
monthlyAverage = pd.read_csv('MonthlyAverage.csv')
monthlyAverage.head(7)
# I want to create the x and y (date and average close price)
# Put them in lists for now
datesByMonth = []
pricesByMonth = []
# Get all of the data except for the last row
monthlyAveragePrice = monthlyAverage.head(len(monthlyAverage) - 1)
# print(monthlyAveragePrice.shape)
# Get all of the rows from the Date column
monthlyAverage_dates = monthlyAveragePrice.loc[:, 'Date']
# Get all of the rows from the Avg Close Price column
monthlyAverage_ClosePrice = monthlyAveragePrice.loc[:, 'Avg Close Price']
# Create the independent data set 'x' as dates
for date in monthlyAverage_dates:
# I have to separate it by months
datesByMonth.append([int(date.split('-')[1])])
# Create the dependent data set 'y' as close prices of the months
for open_price in monthlyAverage_ClosePrice:
pricesByMonth.append(float(open_price))
# Print the dates by months
print("The months that we are getting the data from are: ", datesByMonth)
print(" ")
# Create a variable named forecast which will be our prediction.
# It will be set up to 1 (predict one month in advance)
forecast = 1
'''
# Now we will create 3 functions to make predictions using 3 different support vector regression models
# with 3 different kernels = radial basis function, linear and polynomial
'''
def predictAveragePriceRBF(date, averagePrice, forecast):
# Create Support Vector Regression Model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
# Train the model on the dates and average prices
svr_rbf.fit(date, averagePrice)
# Plot the model on a graph to see which has the best fit
plt.scatter(date, averagePrice, color='black', label='Data')
plt.plot(date, svr_rbf.predict(date), color='red', label='RBF model')
plt.xlabel('Date by Months')
plt.ylabel('Average Price by Months')
plt.title('Support Vector Machine - SVM')
plt.legend()
plt.show()
# return the model prediction
return svr_rbf.predict(forecast)[0]
def predictAveragePriceRegression(date, price, forecast):
quadratic_Regression = make_pipeline(PolynomialFeatures(2), Ridge())
quadratic_Regression.fit(date, price)
# Plot the model on a graph to see which has the best fit
plt.scatter(date, price, color='black', label='Data')
plt.plot(date, quadratic_Regression.predict(date), color='yellow', label='Regression model')
plt.xlabel('Date by Months')
plt.ylabel('Average Price by Months')
plt.title('Quadratic regression Model')
plt.legend()
plt.show()
return quadratic_Regression.predict(forecast)
def getRegressionAccuracy():
normalized = pd.read_csv('NormalizedData.csv')
months = normalized[['Avg Close Price']]
months['Prediction'] = months[['Avg Close Price']].shift(-forecast)
# Create train set as the price per month
train = np.array(months.drop(['Prediction'], 1))
train = train[:-forecast]
# Create test set as the column prediction
test = np.array(months['Prediction'])
test = test[:-forecast]
# Split the data
# 80% training and 20% test
X_train, X_test, y_train, y_test = train_test_split(train,
test, test_size=0.2, random_state=109)
quadratic_Regression = make_pipeline(PolynomialFeatures(2), Ridge())
quadratic_Regression.fit(X_train, y_train)
# Printing the results as the confidence level
return quadratic_Regression.score(X_test, y_test)
# SVM - kernel poly
def predictAveragePricePoly(date, averagePrice, forecast):
# Create Support Vector Regression Model
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
# Train the model on the dates and average prices
svr_poly.fit(date, averagePrice)
# Plot the model on a graph to see which has the best fit
plt.scatter(date, averagePrice, color='black', label='Data')
plt.plot(date, svr_poly.predict(date), color='blue', label='Polynomial model')
plt.xlabel('Date by Months')
plt.ylabel('Average Price by Months')
plt.title('Support Vector Machine - SVM')
plt.legend()
plt.show()
# return the model prediction
return svr_poly.predict(forecast)[0]
# SVM linear
def predictAveragePriceLinear(date, averagePrice, forecast):
# Create Support Vector Regression Model
svr_lin = SVR(kernel='linear', C=1e3)
# Train the model on the dates and average prices
svr_lin.fit(date, averagePrice)
# Plot the model on a graph to see which has the best fit
plt.scatter(date, averagePrice, color='black', label='Data')
plt.plot(date, svr_lin.predict(date), color='green', label='Linear model')
plt.xlabel('Date by Months')
plt.ylabel('Average Price by Months')
plt.title('Support Vector Machine - SVM')
plt.legend()
plt.show()
# return the model prediction
return svr_lin.predict(forecast)[0]
predicted_priceRBF = predictAveragePriceRBF(datesByMonth, pricesByMonth, [[13]])
predicted_priceLinear = predictAveragePriceLinear(datesByMonth, pricesByMonth, [[13]])
predicted_pricePoly = predictAveragePricePoly(datesByMonth, pricesByMonth, [[13]])
'''
# Creating the SVM to get the accuracy of the models using different kernels
'''
# ------ Get the LINEAR model accuracy ---------------------------------------------
def getAccuracyLINEAR():
normalized = pd.read_csv('NormalizedData.csv')
months = normalized[['Avg Close Price']]
months['Prediction'] = months[['Avg Close Price']].shift(-forecast)
# Create train set as the price per month
train = np.array(months.drop(['Prediction'], 1))
train = train[:-forecast]
# Create test set as the column prediction
test = np.array(months['Prediction'])
test = test[:-forecast]
# Split the data
# 80% training and 20% test
X_train, X_test, y_train, y_test = train_test_split(train,
test, test_size=0.2, random_state=109)
lin = SVR(kernel='linear', C=1e3)
lin.fit(X_train, y_train)
return lin.score(X_test, y_test)
# ------ Get the RBF model accuracy ---------------------------------------------
def getAccuracyRBF():
normalized = pd.read_csv('NormalizedData.csv')
months = normalized[['Avg Close Price']]
months['Prediction'] = months[['Avg Close Price']].shift(-forecast)
# Create train set as the price per month
train = np.array(months.drop(['Prediction'], 1))
train = train[:-forecast]
# Create test set as the column prediction
test = np.array(months['Prediction'])
test = test[:-forecast]
# Split the data
# 80% training and 20% test
X_train, X_test, y_train, y_test = train_test_split(train,
test, test_size=0.2, random_state=109)
rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
rbf.fit(X_train, y_train)
return rbf.score(X_test, y_test)
# ------ Get the POLYNOMIAL model accuracy ---------------------------------------------
def getAccuracyPOLY():
months = monthlyAverage[['Avg Close Price']]
months['Prediction'] = months[['Avg Close Price']].shift(-forecast)
# Create train set as the price per month
train = np.array(months.drop(['Prediction'], 1))
train = train[:-forecast]
# Create test set as the column prediction
test = np.array(months['Prediction'])
test = test[:-forecast]
# Split the data
# 80% training and 20% test
X_train, X_test, y_train, y_test = train_test_split(train,
test, test_size=0.2, random_state=109)
poly = SVR(kernel='poly', C=1e3, degree=2)
poly.fit(X_train, y_train)
return poly.score(X_test, y_test)
'''
Function to implement from skcit learn library the KNN algorithm and train it on our data
Also will plot the result separately and all together to see the differences between the models
'''
def makePredKNN(date, averagePrice, forecast):
# Create the KNN
k_NN = KNeighborsRegressor(n_neighbors=3)
k_NN.fit(date, averagePrice)
price = k_NN.predict(forecast)
# Plot the model on a graph to see which has the best fit
plt.scatter(date, averagePrice, color='black', label='Data')
plt.plot(date, k_NN.predict(date), color='purple', label='K-NN Model')
plt.xlabel('Date by Months')
plt.ylabel('Average Price by Months')
plt.title('K - Nearest Neighbour')
plt.legend()
plt.show()
# return the model prediction
return price
# Function to get the KNN model acuuracy
def getAccuracyKNN():
normalized = pd.read_csv('NormalizedData.csv')
months = normalized[['Avg Close Price']]
months['Prediction'] = months[['Avg Close Price']].shift(-forecast)
# Create train set as the price per month
train = np.array(months.drop(['Prediction'], 1))
train = train[:-forecast]
# Create test set as the column prediction
test = np.array(months['Prediction'])
test = test[:-forecast]
# Split the data
# 80% training and 20% test
X_train, X_test, y_train, y_test = train_test_split(train,
test, test_size=0.2, random_state=109)
k_NN = KNeighborsRegressor(n_neighbors=3)
k_NN.fit(X_train, y_train)
return k_NN.score(X_test, y_test)
predicted_PriceKNN = makePredKNN(datesByMonth, pricesByMonth, [[13]])
predicted_PriceRegression = predictAveragePriceRegression(datesByMonth, pricesByMonth, [[13]])
print("--------------------------------------------------------------------------")
print(" ")
print("The predicted price of the next month is: ")
print("")
print("---> using RBF kernel: $", predicted_priceRBF)
print("-> the model has a accuracy of: ",
round((getAccuracyRBF() * 100), 3), "%")
print("")
print("---> using Polynomial kernel: ", predicted_pricePoly)
print("-> the model has a accuracy of: ",
round((getAccuracyPOLY() * 100), 3), "%")
print("")
print("---> using Linear kernel: ", predicted_priceLinear)
print("-> the model has a accuracy of: ",
round((getAccuracyLINEAR() * 100), 3), "%")
print(" ")
print("---> using KNN model: ", predicted_PriceKNN)
print("-> the model has a accuracy of: ",
round((getAccuracyKNN() * 100), 3), "%")
print("")
print("---> using Regression model : ", predicted_priceLinear)
print("-> the model has a accuracy of: ",
round((getRegressionAccuracy() * 100), 3), "%")
print("--------------------------------------------------------------------------")
def bestAccuracy():
rbf = getAccuracyRBF()
lin = getAccuracyLINEAR()
poly = getAccuracyPOLY()
knn = getAccuracyKNN()
reg = getRegressionAccuracy()
maxList = [rbf, lin, poly, knn, reg]
return max(maxList)
# Function to get the best price(given by the model with the best accuracy)
def getBestPrice():
rbf = getAccuracyRBF()
lin = getAccuracyLINEAR()
poly = getAccuracyPOLY()
knn = getAccuracyKNN()
reg = getRegressionAccuracy()
accuracies = {rbf: predicted_priceRBF,
lin: predicted_priceLinear,
poly: predicted_pricePoly,
knn: predicted_PriceKNN,
reg: predicted_PriceRegression
}
if bestAccuracy() == rbf:
return accuracies[rbf]
elif bestAccuracy() == lin:
return accuracies[lin]
elif bestAccuracy() == knn:
return accuracies[knn]
elif bestAccuracy() == reg:
return accuracies[reg]
else:
return accuracies[poly]
return predictedPrice
def getPrice2Decimals(bestPrice):
return "{:0.2f}".format(bestPrice)
print(getPrice2Decimals(getBestPrice()))
# Function to make to make prediction if the month is going up or down
def makePred(pricePredicted, yearAverage):
if getBestPrice() < predictionUpOrDown():
print("The price of the next month will be: $", getBestPrice())
print("")
print("The predicted month will go DOWN. ")
print("You should NOT buy the stock now")
elif getBestPrice() > predictionUpOrDown():
print("The price of the next month will be: ", getBestPrice())
print("")
print("The predicted month will go UP. ")
print("You SHOULD buy the stock now")
else:
print("The stock price will keep the same value")
print(" ")
def plotAllModels(date, averagePrice, forecast):
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
svr_poly.fit(date, averagePrice)
svr_lin = SVR(kernel='linear', C=1e3)
svr_lin.fit(date, averagePrice)
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_rbf.fit(date, averagePrice)
k_NN = KNeighborsRegressor(n_neighbors=3)
k_NN.fit(date, averagePrice)
quadratic_Regression = make_pipeline(PolynomialFeatures(2), Ridge())
quadratic_Regression.fit(date, averagePrice)
# Plot the model on a graph to see which has the best fit
plt.scatter(date, averagePrice, color='black', label='Data')
plt.plot(date, svr_lin.predict(date), color='green', label='Linear model')
plt.plot(date, svr_poly.predict(date), color='blue', label='Polynomial model')
plt.plot(date, svr_rbf.predict(date), color='red', label='RBF model')
plt.plot(date, k_NN.predict(date), color='purple', label='KNN model')
#plt.plot(date, quadratic_Regression.predict(date), color='yellow', label="Regression model")
plt.xlabel('Date by Months')
plt.ylabel('Average Price by Months')
plt.title('PREDICTION MODELS')
plt.legend()
plt.show()
print("")
print("My Predictions for the next month price stock are: ")
print(" ")
print(makePred(getBestPrice(), a))
print("")
print("--------------------------------------------------------------------------")
print(" ")
plotAllModels(datesByMonth, pricesByMonth, 13)
|
[
"cirimia49@gmail.com"
] |
cirimia49@gmail.com
|
fc9f01887c4a6b276e93e9c6fd48ae39dd9e98b0
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/p3BR/R2/benchmark/startQiskit_Class66.py
|
de1dd2f58ce2a9d58b5a7c4f8933d6310d93f36a
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,442
|
py
|
# qubit number=3
# total number=11
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.x(input_qubit[2]) # number=2
prog.h(input_qubit[1]) # number=7
prog.cz(input_qubit[2],input_qubit[1]) # number=8
prog.h(input_qubit[1]) # number=9
prog.cx(input_qubit[2],input_qubit[1]) # number=4
prog.cx(input_qubit[2],input_qubit[1]) # number=10
prog.z(input_qubit[2]) # number=3
prog.y(input_qubit[2]) # number=5
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_Class66.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
41ae5d87df532d2ee47b6a435c5e70a35ee8c637
|
f8295c4b18d76d2c4467de8351802c2c741f06d9
|
/example_project/example_project/urls.py
|
f8969c615b427f40f3029ac2d2ff64d5671751db
|
[] |
no_license
|
stefanw/django-wikidata
|
993b95ea7060c22a1c7ba4cdb46f3fbeb9338aca
|
41b41650a1f5b893a7aa7855864a9a05e8e5d372
|
refs/heads/master
| 2020-04-24T20:25:31.897429
| 2019-02-24T09:38:52
| 2019-02-24T09:38:52
| 172,243,029
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 757
|
py
|
"""example_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
|
[
"mail@stefanwehrmeyer.com"
] |
mail@stefanwehrmeyer.com
|
667cc5de2a100c1fe23d3a5cb63b414d78c160e0
|
b8d24161d9935e7eb148d9fbbf7bdf6bde9a57f4
|
/NumGuessGame.py
|
59c42714fcd2c7911f05c018410af472cf97682a
|
[] |
no_license
|
Rahul-Kumar-Tiwari/Hactoberfest2020-1
|
9881f30bb2a90d368f29d2c1d648554c7401805a
|
0dff2c8daba4340732ef023399510303a38ef16d
|
refs/heads/main
| 2023-08-26T05:15:23.354071
| 2020-11-09T08:08:23
| 2020-11-09T08:08:23
| 417,149,310
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,377
|
py
|
# Guess_a_number
#Script of a game that asks the player to guess the number in specific number of attempts.
import random
print("What is your name?") #Asking player to enter name.
name = str(input()) #Taking input from player.
print("Hey " + name + ",I was thinking of a number between 1 to 20.Can you guess it???")
secret_number = random.randint(1, 20) #Generating a random number for player to guess.
#Taking input of guess from player
for guess_taken in range(1, 7):
if guess_taken == 1:
print("Guess the number ")
elif guess_taken == 6:
print("This is your last chance.")
else:
print("Give another try")
guess = int(input())
if guess > secret_number:
print("Your guess is too high.")
elif guess < secret_number:
print("Your guess is too low.")
else:
break #This condition is for correct guess!
if guess == secret_number:
if guess_taken == 1:
print("Amazing!!! You guessed the number in just one guess.")
else:
print("Good job " + name + "! You guessed my number in " + str(guess_taken) + " guesses.") #Executes when player guess the number.
else:
print("Nope.The number I was thinking of was " + str(secret_number) + ".Better luck next time!!!") #Executes when player fails to guess the number in given chances.
|
[
"noreply@github.com"
] |
noreply@github.com
|
527d383097cc6bb45bd1a5ade545ad9dcf8a4402
|
f7715ace0306d1a9650c0c8470791e56eaa4eb43
|
/telemetrymonitor.py
|
754bdc9c16589a1fa9e6b8d9b96c315349ea1d5f
|
[] |
no_license
|
cbott/MHRtelemetry
|
2898ae7b2fc88571113ed8f4886a921a404aa701
|
6f332621bcc80ca642f7e03e6828d55ab2926c39
|
refs/heads/master
| 2021-01-10T22:59:05.447082
| 2017-05-07T20:45:58
| 2017-05-07T20:47:31
| 70,435,459
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,322
|
py
|
#!/usr/bin/env python
#grapharduino.py
from arduinoserial import Arduino
from liveplots import *
import sys
from time import strftime
def basic_graph(arduino, log):
SIZE = 1 #Expected packet length
liveplot_init(2,1, "Basic Telemetry Data")
speedometer = Dial(row=0, col=0, title="Car Speed (MPH)", ymin=0, ymax=90, ticks=10)
readout = Text(row=1, col=0, title="")
topspeed = -100
data = [ 0 for i in range(SIZE) ];
while 1:
for val in arduino.read():
try:
vals = val.split()
data = map(float, vals)
print(data)
except (AttributeError, ValueError):
print "Received unexpected data [",val,"]"
if len(data) == SIZE:
log.write(strftime("[%H:%M:%S] ") + str(data) + "\n")
mph = data[0]
speedometer.update(mph)
topspeed = max(topspeed, mph)
readout.update("Current Speed: "+str(mph)+" MPH\nTop Speed: "+str(topspeed)+" MPH")
liveplot_update(0.1)
def car_monitor(arduino, log):
####### Data Format
## Byte array
#0# [Engine Temp] 0-255
#1# [Left EV Battery Temp] 0-255
#2# [Right EV Battery Temp] 0-255
#3# [Left Accumulator SOC] 0-100
#4# [Right Accumulator SOC] 0-100
#5# [RPM] 0-150 x100
#6# [Left In-Hub Motor Temp] 0-255
#7# [Right In-Hub Motor Temp] 0-255
#8# [Left Motor Controller Temp] 0-255
#9# [Right Motor Controller Temp] 0-255
#10# [Accel Pedal Percentage] 0-100
#11# [Accel Pedal Error] boolean
#12# [Brake Pedal Percentage] 0-100
#13# [Brake Pedal Error] boolean
#14# [Low Voltage Battery SOC] 0-120 /10
SIZE = 15
# Create the window
liveplot_init(3,5, "MHR17 Telemetry System")
# Add widgets
engine_t = ScrollingLinePlot(row=0, col=3, title="Engine Temp.", ymin=0, ymax=255, width=25, ylabel="deg F")
l_bat_t = ScrollingLinePlot(row=1, col=0, title="Left Battery Temp.", ymin=0, ymax=255, width=25, ylabel="deg F")
r_bat_t = ScrollingLinePlot(row=2, col=0, title="Right Battery Temp.", ymin=0, ymax=255, width=25, ylabel="deg F")
l_bat_soc = BarChart(row=0, col=0, title="Left Accumulator SOC", ymin=0, ymax=100, ylabel="%")
r_bat_soc = BarChart(row=0, col=1, title="Right Accumulator SOC", ymin=0, ymax=100, ylabel="%")
lv_soc = BarChart(row=0, col=2, title="LV Battery Voltage", ymin=0, ymax=13, ylabel="V")
rpm = Dial(row=0, col=4, title="Engine RPM (x1000)", ymin=0, ymax=15, ticks=16)
l_motor_t = ScrollingLinePlot(row=1, col=1, title="Left In-Hub Temp.", ymin=0, ymax=255, width=25, ylabel="deg F")
r_motor_t = ScrollingLinePlot(row=2, col=1, title="Right In-Hub Temp.", ymin=0, ymax=255, width=25, ylabel="deg F")
l_mc_t = ScrollingLinePlot(row=1, col=2, title="Left Motor Controller Temp.", ymin=0, ymax=255, width=25, ylabel="deg F")
r_mc_t = ScrollingLinePlot(row=2, col=2, title="Right Motor Controller Temp.", ymin=0, ymax=255, width=25, ylabel="deg F")
accel = BarChart(row=1, col=3, title="Accel Pedal %", ymin=0, ymax=100, ylabel="%")
accel_err = BarChart(row=1, col=4, title="Accel Pedal Error", ymin=0, ymax=1, ylabel="", show_axes=False)
brake = BarChart(row=2, col=3, title="Brake Pedal %", ymin=0, ymax=100, ylabel="%")
brake_err = BarChart(row=2, col=4, title="Brake Pedal Error", ymin=0, ymax=1, ylabel="", show_axes=False)
# Mainloop
data = [ 0 for i in range(SIZE) ];
while 1:
# Read in all messages from serial buffer
for val in arduino.read():
try:
vals = val.split()
data = map(int, vals)
print(data)
except (AttributeError, ValueError):
print "Received unexpected data [",val,"]"
if len(data) == SIZE:
# Update widgets with new data
log.write(strftime("[%H:%M:%S] ") + str(data) + "\n")
engine_t.update(data[0])
l_bat_t.update(data[1])
r_bat_t.update(data[2])
l_bat_soc.update(data[3])
r_bat_soc.update(data[4])
rpm.update(data[5]/10.0)
l_motor_t.update(data[6])
r_motor_t.update(data[7])
l_mc_t.update(data[8])
r_mc_t.update(data[9])
accel.update(data[10])
accel_err.update(1,color = 'r' if data[11] else 'g')
brake.update(data[12])
brake_err.update(1,color = 'r' if data[13] else 'g')
lv_soc.update(data[14]/10.0)
# Refresh the window
liveplot_update(0.1)
if __name__ == "__main__":
serial_port = ""
log_file = "Log.txt"
if len(sys.argv) == 2:
serial_port = sys.argv[1]
elif len(sys.argv) == 3:
serial_port = sys.argv[1]
log_file = sys.argv[2]
else:
print("Incorrect Argument Format\nUsage: python grapharduino.py serial_port [log file]")
sys.exit()
arduino = Arduino(serial_port)
log = open(log_file, 'a')
log.write(strftime(">> BEGIN LOG << %m/%d/%y at %I:%M %p\n"))
try:
car_monitor(arduino, log)
except (Exception, KeyboardInterrupt) as e:
print(e)
finally:
#run cleanup procedures when application closes
arduino.close()
log.close()
|
[
"cbott6@gmail.com"
] |
cbott6@gmail.com
|
4a44f533cf74b906f0764a0071db7ed995fe230f
|
8677fcc71a14b53eee1853e477f0821efaa7bda2
|
/2017/day03/tests.py
|
ef8fa3e4acb6e164db910f7d987c910307bc0f6c
|
[] |
no_license
|
corentingi/adventofcode
|
0c30099175c1de0d84ce48d0dc47935fa2aea7d5
|
193a591a2f5313e602751d2051c6004794ab0882
|
refs/heads/master
| 2022-03-18T20:20:28.710395
| 2019-12-03T09:37:41
| 2019-12-03T09:37:41
| 112,927,276
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 476
|
py
|
#!/usr/bin/env python3
# coding: utf8
import unittest
from script import manhattan_steps_part1, manhattan_steps_part2
cases_part1 = [
(1, 0),
(12, 3),
(23, 2),
(1024, 31),
]
class TestScript(unittest.TestCase):
def test_manhattan_steps_part1(self):
for input_value, expected in cases_part1:
result = manhattan_steps_part1(input_value)
self.assertEqual(result, expected)
if __name__ == '__main__':
unittest.main()
|
[
"corentin.gitton@gmail.com"
] |
corentin.gitton@gmail.com
|
78b7438d65e518367530ce1ce4adeed283a97e9a
|
002ee33a04a6a74c10be79a2d667871de90fe728
|
/faq/views.py
|
4c8191dd9394c62569a71e75a3d988cd4a34e227
|
[] |
no_license
|
Code-Institute-Submissions/final-milestone-eCommerce
|
dc5866c61acd31bbf59ed31168e3e8110262a737
|
d1547f90dc26ca20be299b98966865ef88df0027
|
refs/heads/master
| 2022-11-26T00:27:32.014852
| 2020-08-07T14:44:55
| 2020-08-07T14:44:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
from django.shortcuts import render
from .models import FAQ
def show_faqs(request):
"""Renders all of the current frequently asked questions
to the faq.html page
"""
faqs = FAQ.objects.all()
return render(request, 'faq/faq.html', {'faqs': faqs})
|
[
"adrian.havenga@yahoo.com"
] |
adrian.havenga@yahoo.com
|
c16d7fead97b6cc83da3eef62159a58da37d2b06
|
f04b275d6c08ce4725a0d2737a5889616e33af86
|
/isedc/sedc_target2_time.py
|
f84c9208ba613161b092aece652f9105806d849a
|
[] |
no_license
|
ADMAntwerp/ImageCounterfactualExplanations
|
aef53e197a7cc49cf6418ca1f62a3ebc9d098a40
|
1c8bd4f66433427fe1b9de3795131f39888793fa
|
refs/heads/main
| 2023-08-14T23:55:17.379401
| 2021-09-15T13:04:33
| 2021-09-15T13:04:33
| 404,261,211
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,571
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 17 13:56:26 2020
@author: TVermeire
"""
def sedc_target2_time(image, classifier, segments, target_class, mode, time_limit=15):
import time
import numpy as np
import cv2
start = time.time()
result = classifier.predict(image[np.newaxis,...])
c = np.argmax(result)
p = result[0,target_class]
R = [] #list of explanations
I = [] #corresponding perturbed images
C = [] #corresponding new classes
P = [] #corresponding scores for original class
too_long = False
original_score = False
sets_to_expand_on = []
P_sets_to_expand_on = np.array([])
if mode == 'mean':
perturbed_image = np.zeros((224,224,3))
perturbed_image[:,:,0] = np.mean(image[:,:,0])
perturbed_image[:,:,1] = np.mean(image[:,:,1])
perturbed_image[:,:,2] = np.mean(image[:,:,2])
elif mode == 'blur':
perturbed_image = cv2.GaussianBlur(image, (31,31), 0)
elif mode == 'random':
perturbed_image = np.random.random((224,224,3))
elif mode == 'inpaint':
perturbed_image = np.zeros((224,224,3))
for j in np.unique(segments):
image_absolute = (image*255).astype('uint8')
mask = np.full([image_absolute.shape[0],image_absolute.shape[1]],0)
mask[segments == j] = 255
mask = mask.astype('uint8')
image_segment_inpainted = cv2.inpaint(image_absolute, mask, 3, cv2.INPAINT_NS)
perturbed_image[segments == j] = image_segment_inpainted[segments == j]/255.0
for j in np.unique(segments):
test_image = image.copy()
test_image[segments == j] = perturbed_image[segments == j]
result = classifier.predict(test_image[np.newaxis,...])
c_new = np.argmax(result)
p_new = result[0,target_class]
if c_new == target_class:
R.append([j])
I.append(test_image)
C.append(c_new)
P.append(p_new)
else:
sets_to_expand_on.append([j])
P_sets_to_expand_on = np.append(P_sets_to_expand_on,p_new - result[0,c])
cs = j
while len(R) == 0:
if (time.time() - start) > time_limit:
# To create output for experiment (very dirty)
too_long = True
explanation = False
perturbation = test_image
segments_in_explanation = cs
target_score = p_new
new_class = c_new
original_score = result[0,c]
break
combo = np.argmax(P_sets_to_expand_on)
combo_set = []
for j in np.unique(segments):
if j not in sets_to_expand_on[combo]:
combo_set.append(np.append(sets_to_expand_on[combo],j))
# Make sure to not go back to previous node
del sets_to_expand_on[combo]
P_sets_to_expand_on = np.delete(P_sets_to_expand_on,combo)
for cs in combo_set:
test_image = image.copy()
for k in cs:
test_image[segments == k] = perturbed_image[segments == k]
result = classifier.predict(test_image[np.newaxis,...])
c_new = np.argmax(result)
p_new = result[0,target_class]
if c_new == target_class:
R.append(cs)
I.append(test_image)
C.append(c_new)
P.append(p_new)
else:
sets_to_expand_on.append(cs)
P_sets_to_expand_on = np.append(P_sets_to_expand_on,p_new - result[0,c])
if too_long == False:
# Select best explanation: highest target score increase
best_explanation = np.argmax(P - p)
segments_in_explanation = R[best_explanation]
explanation = np.full([image.shape[0],image.shape[1],image.shape[2]],0/255.0)
for i in R[best_explanation]:
explanation[segments == i] = image[segments == i]
perturbation = I[best_explanation]
new_class = C[best_explanation]
target_score = P[best_explanation]
else:
print('No explanation found within time limit of ' + str(time_limit) + ' seconds.')
return explanation, segments_in_explanation, perturbation, new_class, original_score, target_score, too_long
|
[
"mazzine.r@gmail.com"
] |
mazzine.r@gmail.com
|
67a7dffac9b09ccf1b48ad50fcbdc6d4a116e329
|
a0568cddfdd59f85117455557ca9f46b63db85bc
|
/ClassDemo/yieldDemo.py
|
f1f80d6f390d9386e452bc598fe8155f21365e45
|
[] |
no_license
|
wuud/python_demo
|
ef5e27de9850b7e95f77625667266e327f3fd8c2
|
9fced6907b38c736f65288de7c2006ff8615fab0
|
refs/heads/master
| 2020-07-28T05:53:31.644819
| 2019-09-18T14:32:29
| 2019-09-18T14:32:29
| 209,329,538
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 513
|
py
|
'''
一个函数中一旦有yield,这个函数就成了一个Generator,
所以函数在调用的时候并不会立即执行,只有调用.__next__()或
.send()方法时函数才会执行到yield对应的地方,每调用一次
next函数就执行到下一个yield所在地。
'''
def f():
print("my demo")
m=yield 1#yield后面的值赋给了n.__next__(),而此时m是None,send()方法才是给m赋值的
print(m)
print("t")
n=yield 2
n=f()
n.__next__()
n.__next__()
#n.send("run")
|
[
"wuu_dd@qq.com"
] |
wuu_dd@qq.com
|
8802f66220d335724d20ca7fe5fd5168ee00ba1d
|
d91f23534d9af0128011d38132dbefd8b508f1c1
|
/kensho/parse_trendreverse.py
|
9e8a180dda24d34646cbea7783dc3a03e87bc284
|
[] |
no_license
|
tomoyanp/oanda_dev
|
06bd904cd0d60e072a0627a81b1db384e1e292a5
|
c35da11f30e6b4160d16f768ce5d8d0714c2b55d
|
refs/heads/master
| 2021-01-10T22:55:51.639812
| 2018-08-21T01:03:27
| 2018-08-21T01:03:27
| 70,463,447
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,500
|
py
|
import re
import commands
from datetime import datetime
import time
file_list = commands.getoutput("ls *.result")
file_list = file_list.split("\n")
import sys
filename = sys.argv[1].strip()
write_file = open("%s.parse" % filename, "a")
write_file.write("# %s\n" % filename)
cmd = "cat %s | grep Algorithm" % filename
out = commands.getoutput(cmd)
out = out.split("\n")
algo_list = out
cmd = "cat %s | grep \"EXECUTE ORDER\"" % filename
out = commands.getoutput(cmd)
out = out.split("\n")
order_list = out
cmd = "cat %s | grep \"EXECUTE SETTLE\"" % filename
out = commands.getoutput(cmd)
out = out.split("\n")
settle_list = out
cmd = "cat %s | grep PROFIT | grep -v STL" % filename
out = commands.getoutput(cmd)
out = out.split("\n")
profit_list = out
cmd = "cat %s | grep TRADE_FLAG" % filename
out = commands.getoutput(cmd)
out = out.split("\n")
flag_list = out
cmd = "cat %s | grep upper_sigma" % filename
out = commands.getoutput(cmd)
out = out.split("\n")
upper_sigma_list = []
for elm in out:
upper_sigma_list.append(elm.split("=")[1].strip())
print upper_sigma_list
cmd = "cat %s | grep lower_sigma" % filename
out = commands.getoutput(cmd)
out = out.split("\n")
lower_sigma_list = []
for elm in out:
lower_sigma_list.append(elm.split("=")[1].strip())
print lower_sigma_list
for i in range(0, len(profit_list)):
algo = algo_list[i].split(" ")[2]
order_time = order_list[i].split(" ")[4] + " " + order_list[i].split(" ")[5]
profit = profit_list[i].split(" ")[1].split("=")[1]
side = flag_list[i].split(" ")[2].split("=")[1]
settle_time = settle_list[i].split(" ")[4] + " " + settle_list[i].split(" ")[5]
order_ptime = datetime.strptime(order_time, "%Y-%m-%d %H:%M:%S")
settle_ptime = datetime.strptime(settle_time, "%Y-%m-%d %H:%M:%S")
difference_time = settle_ptime - order_ptime
start_time = datetime.strptime(order_time, "%Y-%m-%d %H:%M:%S")
end_time = datetime.strptime(settle_time, "%Y-%m-%d %H:%M:%S")
end_time = time.mktime(end_time.timetuple())
start_time = time.mktime(start_time.timetuple())
result = end_time - start_time
result = datetime.fromtimestamp(result)
days = result.day-1
hour = result.hour
difference_sigma = float(upper_sigma_list[i]) - float(lower_sigma_list[i])
print order_time + "," + settle_time + "," + str(difference_time.total_seconds()) + "," + algo + "," + side + "," + profit + "," + str(difference_sigma)
# print algo_list[i].split(" ")[2], profit_list[i].split(" ")[2]
write_file.close()
|
[
"tomoyanpy@gmail.com"
] |
tomoyanpy@gmail.com
|
34f9ba79d9650e21aabfe30c2947e73c3c635e7b
|
50f44650c2229eef5b3e77e097b71c7b064e9663
|
/Max/src/TestMax.py
|
3660f5d795b6db9d0304f2113bf43ef71c2d552a
|
[] |
no_license
|
gonzeD/CS1-Python
|
b0821cc0c5755f796548363074ac2f48df47e544
|
cb9a661a43652c6d3fd59c0723b8c764c7d28ff7
|
refs/heads/master
| 2020-03-22T03:37:17.637568
| 2018-07-05T21:10:58
| 2018-07-05T21:10:58
| 139,442,216
| 0
| 1
| null | 2018-07-04T07:47:56
| 2018-07-02T12:41:56
|
Python
|
UTF-8
|
Python
| false
| false
| 913
|
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import unittest
import random
import CorrMax as corr
import max
class TestMax(unittest.TestCase):
def test_exist(self):
self.assertEqual(True, hasattr(max, 'maximum'), _("You did not name the method as expected."))
def test_max(self):
lists = [[random.randint(-100, 100) for _ in range(random.randint(1, 20))] for _ in range(random.randint(1, 20))]
ans = _("The maximum of {} is {} and you returned {}.")
for i in range(len(lists)):
stu_ans = max.maximum(lists[i])
corr_ans = corr.maximum(lists[i])
self.assertEqual(corr_ans, stu_ans, ans.format(lists[i], corr_ans, stu_ans))
def test_empty(self):
lst = []
ans = _("When the list is empty you should return None.")
self.assertEqual(None, max.maximum(lst), ans)
if __name__ == '__main__':
unittest.main()
|
[
"dtanguy@localhost.worktdb"
] |
dtanguy@localhost.worktdb
|
d266cb49ad369541c1fbda4f0182851a05f6e423
|
2e75e1a935461c0fa80b59c496c1553f44c4cf49
|
/venv/Scripts/pip3-script.py
|
5f20486dcba63eacf2d976d8d3fffe41e54e2443
|
[] |
no_license
|
zzz36617004/bigdata-test2
|
8e65a23023eb206000c72e41658641117fa2b958
|
142c89b532ac50f5c6eca41bdc9fe910af081085
|
refs/heads/master
| 2020-03-18T21:15:08.677554
| 2018-05-29T09:03:27
| 2018-05-29T09:03:27
| 135,269,591
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
#!D:\javaStudy\pycharmproject\python-fb\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3')()
)
|
[
"zzz36617004@naver.com"
] |
zzz36617004@naver.com
|
3adcd6a7bfde73ec5edccbb99921595eded1c389
|
87ed923199e8c49f32510fcf73f3b72a382ed183
|
/plastron-plate/platron-stub.py
|
1ee3d0556dcf022d728c787269ee6f488d632d25
|
[] |
no_license
|
semicontinuity/hardware
|
775306cbef12d26b27b96712e869536a8d521954
|
74a57f1199e6cf2c9dc0387007fa57cfde9c6751
|
refs/heads/master
| 2023-08-08T12:24:09.049435
| 2023-07-29T08:59:40
| 2023-07-29T08:59:40
| 4,886,190
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 969
|
py
|
from gcode import *
WIDTH=249.6
HEIGHT=47.6
THICK=8
RM=1.5
def milling_program(gcode):
return gcode.program(
gcode.with_spindle(
'E',
gcode.deep_line(-WIDTH/2 - RM, -HEIGHT/2 - RM, -WIDTH/2 - RM, +HEIGHT/2 + RM),
'W',
gcode.deep_line(+WIDTH/2 + RM, -HEIGHT/2 - RM, +WIDTH/2 + RM, +HEIGHT/2 + RM),
'N',
gcode.deep_line(-WIDTH/2 - RM, +HEIGHT/2 + RM, +WIDTH/2 + RM, +HEIGHT/2 + RM),
'S',
gcode.deep_line(-WIDTH/2 - RM, -HEIGHT/2 - RM, +WIDTH/2 + RM, -HEIGHT/2 - RM),
)
)
def gcode():
gcode = GCode()
gcode.SPINDLE_SPEED = 1000
gcode.FEED_RATE_XY = 120
gcode.FEED_RATE_Z = 60
gcode.Z_UP = 3
gcode.Z_THRESHOLD = 1
gcode.Z_FROM = 0
gcode.Z_DOWN = -THICK-1
gcode.Z_STEP = 0.02
return gcode
def main():
for e in traverse(milling_program(gcode())):
print(e)
if __name__ == '__main__':
main()
|
[
"igor.a.karpov@gmail.com"
] |
igor.a.karpov@gmail.com
|
0b026b7588cfd52cc92d6fd76b2985618ef2f533
|
60ca69e2a4c6b05e6df44007fd9e4a4ed4425f14
|
/beginner_contest/175/C.py
|
430190fbe6f342eff4aae54d28abe6bb704ad2fd
|
[
"MIT"
] |
permissive
|
FGtatsuro/myatcoder
|
12a9daafc88efbb60fc0cd8840e594500fc3ee55
|
25a3123be6a6311e7d1c25394987de3e35575ff4
|
refs/heads/master
| 2021-06-13T15:24:07.906742
| 2021-05-16T11:47:09
| 2021-05-16T11:47:09
| 195,441,531
| 0
| 0
|
MIT
| 2021-05-16T11:47:10
| 2019-07-05T16:47:58
|
Python
|
UTF-8
|
Python
| false
| false
| 331
|
py
|
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10 ** 7)
x, k, d = map(int, input().split())
x = abs(x)
if x - (k * d) >= 0:
print(x - (k * d))
sys.exit(0)
else:
remain = k - (x // d)
p_min = x % d
n_min = abs(p_min -d)
if remain % 2 == 0:
print(p_min)
else:
print(n_min)
|
[
"204491+FGtatsuro@users.noreply.github.com"
] |
204491+FGtatsuro@users.noreply.github.com
|
30a9eba4e06419640b6d697d0f49f48aa0214e5b
|
4b772fc943e19d1418b49bc179b2e2e307ba81b5
|
/cigen/root_page_parser.py
|
5ea1e738681c0449b607d4c239a5b11df31c0cdd
|
[] |
no_license
|
mingming1248/kit
|
da0adb356ccb76a4b4e6a1faaa5b59faf38339f1
|
9af35fc9fdcf58bd0133a6f22d095c4e998a2643
|
refs/heads/master
| 2022-11-07T22:00:37.867470
| 2020-06-25T16:08:29
| 2020-06-25T16:08:29
| 274,893,369
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,451
|
py
|
import requests
from bs4 import BeautifulSoup
import fake_useragent
import json
import os
#注意有个con的词根在windows上无法创建,因为windows不能用con作为文件名
#参考http://iknow.lenovo.com.cn/detail/dc_154023.html
def page_parser(root,link,left_for_download):
root_page=requests.get(link,headers={"User-Agent": fake_useragent.UserAgent().random})
soup=BeautifulSoup(root_page.text,"html.parser")
root_explains=soup.find_all("div",class_="wdef")
#页面最多有5个wdef,分别对应词根、词义、衍生词、近义词、例句,有的项目有缺失
#衍生词、近义词格式不规范,就不提取了
cigen_list=[]
ciyi_list=[]
liju_list=[]
#第一个是做词根时的意思,不会缺失
cigens=root_explains[0].find_all("ol") #做词根时可能有多个意思,每一个ol标签都包含一个
for cigen in cigens:
examples=[] #词根的例子
meaning="" #如果没有这一句,meaning就是try的局部变量,finally访问不到
try: #有的词根没有解释或例子,直接跳过,except无需处理
meaning=cigen.li.p.strong.get_text() #词根的意思。如果首次出现meaning,是try的局部变量
examples_tag=cigen.li.ul.find_all("li") #词根例子所在的tag
for example_tag in examples_tag:
strings=example_tag.stripped_strings #获取所有的字符串,这是一个生成器
for s in strings:
examples.append(s) #将词根的例子添加到列表中
except AttributeError:
pass
finally:
cigen_list.append([meaning,examples]) #将词根的意思和对应的例子作为子列表放入cigens_list
#第二个是做单词时的意思,有可能缺失
if len(root_explains)>1:
ciyis=root_explains[1].find_all("p") #做单词时可能有多个意思,每一个p标签都包含一个
for ciyi in ciyis:
strings=ciyi.stripped_strings
for s in strings:
ciyi_list.append(s) #提取词义很简单,只需要将p标签中的文本拿出来即可
#如果wdef有2个以上(不超过5个),最后一个是例句。有可能缺失
if len(root_explains)>2:
lijus=root_explains[-1].find_all("li") #例句有多个,每一个li标签都包含一个
for liju in lijus:
strings=liju.stripped_strings
for s in strings:
liju_list.append(s) #提取例句很简单,只需要将li标签中的文本拿出来即可
if root=="con":
con_file=root+"n"
with open(f"{con_file}.json","w",encoding="utf8") as file:
root_dict={}
root_dict[root]=[cigen_list,ciyi_list,liju_list]
json.dump(root_dict,file,ensure_ascii=False) #dump会用ascii保存
else:
with open(f"{root}.json","w",encoding="utf8") as file:
root_dict={}
root_dict[root]=[cigen_list,ciyi_list,liju_list]
json.dump(root_dict,file,ensure_ascii=False) #dump会用ascii保存
return left_for_download-1
def root_page_parser():
with open("root_dict.json","r") as file:
root_dict=json.load(file)
# root_dict={"bene":"http://www.cgdict.com/index.php?app=cigen&ac=word&w=bene",
# "amphi":"http://www.cgdict.com/index.php?app=cigen&ac=word&w=amphi",#没有例句
# "ad":"http://www.cgdict.com/index.php?app=cigen&ac=word&w=ad" #词根没有举例
# } #测试使用
left_for_download=len(root_dict)
if not os.path.exists("download"):
os.mkdir("download")
os.chdir("download")
for root,link in root_dict.items():
print(f"{left_for_download}")
if os.path.exists(f"{root}.json"):#如果之前下载过就不下载了
left_for_download-=1
continue
left_for_download=page_parser(root,link,left_for_download)
print("over!")
os.chdir("../")
if __name__=="__main__":
root_page_parser()
|
[
"humm5@mail2.sysu.edu.cn"
] |
humm5@mail2.sysu.edu.cn
|
e6b1fde633b8fe653ad8fd98a145071363df9d07
|
8c946bf0b3d28fb5c37423afded462d971ffaf4a
|
/testcases/multi_instrument_strategy_test.py
|
1a746f810231a2e3b8bef378b643ceaa2942ee5a
|
[
"Apache-2.0"
] |
permissive
|
AdaJass/pyalgotrade3
|
080bf6ee55120c9bd28cd3be1a9395419e68d3f8
|
5481c214891b7bd8ca76fee4e3f5cfcbb9f9de4e
|
refs/heads/master
| 2020-12-31T00:17:20.816028
| 2017-03-29T10:38:36
| 2017-03-29T10:38:36
| 86,557,369
| 25
| 12
| null | 2017-03-29T10:15:23
| 2017-03-29T08:31:03
|
Python
|
UTF-8
|
Python
| false
| false
| 3,814
|
py
|
# PyAlgoTrade
#
# Copyright 2011-2015 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <gabriel.becedillas@gmail.com>
"""
from . import common
from pyalgotrade import bar
from pyalgotrade.barfeed import yahoofeed
from pyalgotrade.barfeed import sqlitefeed
from pyalgotrade import marketsession
from pyalgotrade import strategy
from pyalgotrade.technical import ma
from pyalgotrade.technical import cross
class NikkeiSpyStrategy(strategy.BacktestingStrategy):
def __init__(self, feed, smaPeriod):
strategy.BacktestingStrategy.__init__(self, feed)
assert(smaPeriod > 3)
self.__lead = "^n225"
self.__lag = "spy"
self.__adjClose = feed[self.__lead].getAdjCloseDataSeries()
# Exit signal is more sensitive than entry.
self.__fastSMA = ma.SMA(self.__adjClose, int(smaPeriod/2))
self.__slowSMA = ma.SMA(self.__adjClose, smaPeriod)
self.__pos = None
def onEnterCanceled(self, position):
assert(position == self.__pos)
self.__pos = None
def onExitOk(self, position):
assert(position == self.__pos)
self.__pos = None
def __calculatePosSize(self):
cash = self.getBroker().getCash()
lastPrice = self.getFeed()[self.__lag][-1].getClose()
ret = cash / lastPrice
return int(ret)
def onBars(self, bars):
if bars.getBar(self.__lead):
if cross.cross_above(self.__adjClose, self.__slowSMA) == 1 and self.__pos is None:
shares = self.__calculatePosSize()
if shares:
self.__pos = self.enterLong(self.__lag, shares)
elif cross.cross_below(self.__adjClose, self.__fastSMA) == 1 and self.__pos is not None:
self.__pos.exitMarket()
class TestCase(common.TestCase):
def __testDifferentTimezonesImpl(self, feed):
self.assertTrue("^n225" in feed)
self.assertTrue("spy" in feed)
self.assertTrue("cacho" not in feed)
strat = NikkeiSpyStrategy(feed, 34)
strat.run()
self.assertEqual(round(strat.getResult(), 2), 1033854.48)
def testDifferentTimezones(self):
# Market times in UTC:
# - TSE: 0hs ~ 6hs
# - US: 14:30hs ~ 21hs
feed = yahoofeed.Feed()
for year in [2010, 2011]:
feed.addBarsFromCSV("^n225", common.get_data_file_path("nikkei-%d-yahoofinance.csv" % year), marketsession.TSE.getTimezone())
feed.addBarsFromCSV("spy", common.get_data_file_path("spy-%d-yahoofinance.csv" % year), marketsession.USEquities.getTimezone())
self.__testDifferentTimezonesImpl(feed)
def testDifferentTimezones_DBFeed(self):
feed = sqlitefeed.Feed(common.get_data_file_path("multiinstrument.sqlite"), bar.Frequency.DAY)
feed.loadBars("^n225")
feed.loadBars("spy")
self.__testDifferentTimezonesImpl(feed)
def testDifferentTimezones_DBFeed_LocalizedBars(self):
feed = sqlitefeed.Feed(common.get_data_file_path("multiinstrument.sqlite"), bar.Frequency.DAY)
feed.loadBars("^n225", marketsession.TSE.getTimezone())
feed.loadBars("spy", marketsession.USEquities.getTimezone())
self.__testDifferentTimezonesImpl(feed)
|
[
"Jass Ada"
] |
Jass Ada
|
1b4deeac8b436d185cefcdc653d410ec5470ab1a
|
468c568be3969a0a1dfec0a7505d682a693d5b87
|
/rollback-to-deadline.py
|
793d2e91bc0e75291cf38a2c98ec4624b74fbc38
|
[
"MIT"
] |
permissive
|
apanangadan/autograde-github-classroom
|
0ffc4e3c21e430e3ce06758930dba5e6453ed7ba
|
04a64beb2c1a376fd3f5a6006d1ec5a750bf8fea
|
refs/heads/master
| 2020-03-20T00:17:46.255411
| 2018-06-14T18:14:34
| 2018-06-14T18:14:34
| 137,038,837
| 9
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,279
|
py
|
#! /usr/bin/env python3
#Acknowledgment: https://github.com/Sirusblk
import os, shutil, subprocess
from argparse import ArgumentParser
DATE_STRING = '2018-02-18 01:00 PST'
#DATE_STRING = '2018-04-30 01:00 PST'
def rollback_one_submission(subdirectory):
print(subdirectory)
os.chdir(subdirectory)
# rollback any changes to DATE_STRING
command = 'git checkout `git rev-list -n 1 --before=\"' + DATE_STRING + '\" master`'
os.system(command)
os.chdir('..')
def rollback_all():
""" Create parser for command line arguments """
parser = ArgumentParser(
usage=u'python -m rollback-to-deadline -h',
description=' rollback all submissions to last commit before assignment deadline')
parser.add_argument('-d', '--dest', help=u'Destination directory containing submissions [curr_dir]')
parser.add_argument('--deadline', default=DATE_STRING, help=u'Assignment deadline in git date format [' + DATE_STRING +']')
args = parser.parse_args()
if args.dest:
os.chdir(args.dest)
for entry in sorted(os.scandir('.'),
key = lambda e: e.name):
name = entry.name
if entry.is_dir():
rollback_one_submission(name)
if __name__ == '__main__':
rollback_all()
|
[
"apanangadan@fullerton.edu"
] |
apanangadan@fullerton.edu
|
80458fb88d250a212b89c6b9626dcd8c9c897bd3
|
eeee1acf033919d3e50af74792b1bca1c9296665
|
/Hybrid_TimeOverR.py
|
b39a6268f10287db503c4cd70bbcb6b64ff6d43f
|
[] |
no_license
|
baiqiushi/LimitDB
|
35959cdeddf7fb7658151cbc146281d142310a09
|
a05a1b2da6c6dfe3606a71e8d1d76d75cb81259a
|
refs/heads/master
| 2020-03-23T20:39:00.541794
| 2018-10-23T03:39:43
| 2018-10-23T03:39:43
| 142,053,185
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,365
|
py
|
# Time Over R
# For a given k absolute value, plot T-r curves of different keywords in once canvas
import matplotlib
matplotlib.use('Agg')
import time
import Conf
import DatabaseFactory
import KeywordsUtil
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import numpy as np
###########################################################
# Configurations
###########################################################
database = Conf.DBTYPE
tableName = Conf.TABLE
db = DatabaseFactory.getDatabase(database)
# From what frequency, choose keywords
frequencies = [20000, 30000, 40000, 50000]
# For each frequency, how many keywords we choose
numOfKeywords = 10
# Choose keywords with different frequencies
keywords = []
for freq in frequencies:
keywords.extend(KeywordsUtil.pickNearestKeywordToFrequency(freq, numOfKeywords))
print keywords
# keywords = [('job', 495)]
k_values = [10000, 15000, 20000, 25000, 30000, 35000, 40000] # range(5000, 100000, 5000)
r_percentages = range(10, 110, 10)
r_labels = map(lambda rp: str(rp) + '%', r_percentages)
# plot given p_curves into one image
# p_labels: a list of labels with same order of list of curves in p_curves, format ['label0', 'label1', ..., 'labelN']
# p_x: a list of x axis values with same order of list of curves in p_curves, format [x0, x1, x2, ..., xM]
# p_curves: in the format as [[y0, y1, y2, ..., yM](curve0), [y0, y1, y2, ..., yM](curve1), ..., (curveN)]
# a list of list, totally N curves, for each curve there're M values
# p_x_label: label for x axis
# p_y_label: label for y axis
def plotCurves(p_fileName, p_labels, p_x, p_curves, p_x_label, p_y_label, p_title, p_showLegend=True):
pp = PdfPages(p_fileName + '.pdf')
plt.figure()
n = 0
for i_label in p_labels:
plt.plot(p_x, p_curves[n], label=i_label)
n += 1
plt.xlabel(p_x_label)
plt.ylabel(p_y_label)
plt.title(p_title)
plt.grid(True)
if p_showLegend:
plt.legend()
plt.savefig(pp, format='pdf')
pp.close()
###########################################################
# Run Script
###########################################################
print '================================================='
print ' ' + database + ' Experiment - Time over r value '
print '- Hybrid approach'
print '================================================='
print 'table:', tableName
print 'keywords:', keywords
print 'k_values:', k_values
print 'r_percentage:', r_labels
print '-------------------------------------------------'
start = time.time()
# 1. For each r value:
# For each k value:
# For each keyword, run hybrid query:
# Send dummy query
# Get the execution time of the query
# Time Dictionary stores for each keyword a 2D array, with i->r, j->k, and [i][j]->Time
# {'soccer': [[t(r=R0,k=K0), t(r=R0,k=K1), ...], [t(r=R1,k=K0), t(r=R1,k=K1), ...]], 'rain': [[...]]}
times = {}
for keyword in keywords:
times[keyword[0]] = []
m = 0
for row in r_percentages:
times[keyword[0]].append([])
for col in k_values:
times[keyword[0]][m].append(0)
m += 1
print times
progress = 0
t0 = time.time()
i = 0
for r_p in r_percentages:
print 'Processing r =', str(r_p) + '% ...'
j = 0
for k in k_values:
print ' Processing k =', str(k) + ' ...'
for keyword in keywords:
# Send a dummy query
db.queryDummy()
l_random_r = float(r_p) / 100.0
l_limit_k = k
t_start = time.time()
l_coordinates_hybrid = db.GetCoordinateHybrid(tableName, keyword[0], l_random_r, l_limit_k)
t_end = time.time()
times[keyword[0]][i][j] = t_end - t_start
progress += 1
print '[Total time]', time.time() - t0, \
'[Progress]', str(progress * 100 / (len(keywords) * len(k_values) * len(r_percentages))) + '%'
j += 1
i += 1
print times
# 3. Plot the T-r curves of different keywords in one canvas per k value
print 'Plotting images ...'
for i in range(0, len(k_values), 1):
k = k_values[i]
i_fileName_head = 'k=' + str(k)
# (1) Plot T-r curves of different keywords
i_fileName = i_fileName_head + '_t_r'
i_x = r_percentages
i_labels = []
i_curves = []
print 'keywords:'
for keyword in keywords:
if keyword[1] * 0.9 <= k:
continue
print keyword[0]
i_labels.append(keyword[0] + ':' + str(keyword[1]))
i_curve = np.array(times[keyword[0]])[:, i]
i_curves.append(i_curve)
print i_curves
print 'i_labels:'
print i_labels
i_x_label = 'Random r(%)'
i_y_label = 'Execution Time(s)'
i_title = 'k=' + str(k) + ' - T-r curves of different keywords'
print 'Plotting', i_title
plotCurves(i_fileName, i_labels, i_x, i_curves, i_x_label, i_y_label, i_title)
end = time.time()
print '================================================='
print ' ' + database + ' Experiments - Time over r value '
print '- Hybrid approach'
print '================================================='
print 'table:', tableName
print 'keywords:', keywords
print 'k_values:', k_values
print 'r_percentage:', r_labels
print '-------------------------------------------------'
print 'Finished!', end - start, 'seconds spent.'
|
[
"baiqiushi@gmail.com"
] |
baiqiushi@gmail.com
|
72a0666b7f3a7b98999287ded2a6278c901c35f1
|
c0465b63b853d062fda02bbcce5415e73a58219e
|
/bin/NMF/find_best_RFparams.py
|
8a8faf5d1647df9676b4eb062091a1574b61912d
|
[
"MIT"
] |
permissive
|
hustlc/PipeOne
|
14c0163a90a7cffc810d8ac7a3d2d8a617976cb8
|
4a91a16303818b60e88c3a7defee2155ec329800
|
refs/heads/master
| 2023-03-31T02:37:28.922321
| 2021-03-26T02:11:41
| 2021-03-26T02:11:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,259
|
py
|
from feature_selection_utils import *
from joblib import Parallel, delayed
import fire
def find_best_params(X, y, params_grid, record_fi):
if os.path.exists(record_fi):
os.remove(record_fi)
acc_record = pd.DataFrame(columns=["tree_num","max_depth","min_samples_split","acc"] )
with open(record_fi, "a+") as fout:
all_params = list(ParameterGrid(params_grid) )
#all_params_df = pd.DataFrame(ParameterGrid(params_grid) )
res = Parallel(n_jobs=24)(delayed(loo_validation)(params_['tree_num'],
params_["max_depth"],
params_['min_samples_split'], X, y) for params_ in all_params)
acc_all, train_acc_all, sensitivity_all, specificity_all = zip(*res)
for acc, train_acc, sensitivity, specificity,params in zip(acc_all, train_acc_all, sensitivity_all, specificity_all, all_params):
#acc, train_acc, sensitivity, specificity = loo_validation(params["tree_num"], params["max_depth"],
# params["min_samples_split"], X, y)
# line = "In tree_num={:d}, max_depth={:d}, min_samples_split={:d}, train_acc={:.4f}, loo_acc={:.4f}, sensitivity={:.4f}, specificity={:.4f}".format(
# params["tree_num"], params["max_depth"], params["min_samples_split"], train_acc, acc, sensitivity,
# specificity)
line = "In tree_num={:d}, max_depth={:d}, min_samples_split={:d}, train_acc={:.4f}, loo_acc={:.4f}".format(
params["tree_num"], params["max_depth"], params["min_samples_split"], train_acc, acc)
#print(line)
fout.write(line + "\n")
acc_record_ = pd.DataFrame({"tree_num": [params["tree_num"]],
"max_depth": [params["max_depth"]],
"min_samples_split":[params["min_samples_split"]],
"acc": [acc]
} )
acc_record= acc_record.append(acc_record_, ignore_index=True)
'''
acc_record_2 = pd.DataFrame({"tree_num": all_params_df["tree_num"],
"max_depth": all_params_df["max_depth"],
"min_samples_split": all_params_df["min_samples_split"],
"acc": acc_all,
"sensitivity": sensitivity_all,
"specificity": specificity_all } )
'''
acc_record = acc_record.sort_values(by=['acc'], ascending=False)
acc_record = acc_record.reset_index(drop=True)
#acc_record_2 = acc_record_2.sort_values(by=["sensitivity", "specificity", 'acc'], ascending=False, ignore_index=True)
best_tree_num = acc_record.loc[0,"tree_num"]
best_max_depth = acc_record.loc[0,"max_depth"]
best_min_samples_split = acc_record.loc[0,"min_samples_split"]
best_acc = acc_record.loc[0,"acc"]
acc_record.to_csv("rf_my_record.csv", index=False)
#acc_record_2.to_csv("rf_my_record_2.csv", index=False)
print(best_tree_num, best_max_depth, best_min_samples_split, best_acc)
return best_tree_num, best_max_depth, best_min_samples_split, best_acc
def save_feature_importance(X, y, feature_name, tree_num, max_depth, min_samples_split, fout):
feature_importance = get_feature_importance(tree_num, max_depth, min_samples_split, X, y)
feature_importance_dict = {"feature_name": feature_name, "feature_importance": feature_importance}
feature_importance_df = pd.DataFrame.from_dict(feature_importance_dict)
feature_importance_df.to_csv(fout, index=None)
return feature_importance
def myRF(ddir = "./data_randomForest", tdir = "./FeatureSelection/"):
ddir = ddir.rstrip("/") + "/"
tdir = tdir.rstrip('/') + "/"
#tdir = "./FeatureSelection/"
chck_dir(tdir)
#data_assemble = ["top50", "top100", "top200", "proc"]
data_assemble = ["proc"]
#data_assemble = ["top50", "top100", "top200"]
data_dir = ["%s/%s/" % (ddir, s) for s in data_assemble]
record_fi = [tdir + "%s_RF_params_setting_record.txt" % [s, "all"][s == "proc"] for s in data_assemble]
feature_imp_fi = [tdir + "feature(%s)_importance.csv" % [s, "all"][s == "proc"] for s in data_assemble]
params_grid = {"tree_num": [3, 5, 7, 10, 20, 30, 50, 100],
"max_depth": [2, 3, 4, 7, 10],
"min_samples_split": [2, 3, 4, 5, 7]
}
best_params_record = {"data_dir": [], "tree_num": [],
"max_depth": [], "min_samples_split": [],
"best_loo_acc": []}
topk_for_eval = [10, 20, 50, 100, 200]
topk_for_eval_record = []
for i in range(len(data_assemble)):
X, y, feature_name = load_data(data_dir[i])
res = find_best_params(X, y, params_grid, record_fi[i] )
# res = [10,3,4,1]
best_tree_num = res[0]
best_max_depth = res[1]
best_min_samples_split = res[2]
best_acc = res[3]
best_params_record["data_dir"].append([data_assemble[i], "all"][data_assemble[i] == "proc"])
best_params_record["tree_num"].append(best_tree_num)
best_params_record["max_depth"].append(best_max_depth)
best_params_record["min_samples_split"].append(best_min_samples_split)
best_params_record["best_loo_acc"].append(best_acc)
feature_imp = save_feature_importance(X, y, feature_name,
best_tree_num, best_max_depth,
best_min_samples_split, feature_imp_fi[i])
# eval classification ability of top k features computed by RF
# feature_imp = load_feature_imp(feature_imp_fi[i])
for topk in topk_for_eval:
X_imp, new_feature_name = select_top_imp_feature(X, feature_imp, feature_name, topk)
fout = tdir + "feature(%s)_importance_top%d_for_retraining.csv" \
% ([data_assemble[i], "all"][data_assemble[i] == "proc"], topk)
acc, train_acc, sensitivity, specificity = eval_topk_RF_feature(X_imp, y, new_feature_name, best_tree_num,
best_max_depth, best_min_samples_split, fout)
line = "In {:s}, set tree_num={:d}, max_depth={:d}, min_samples_split={:d}, " \
"select top{:d}, train_acc={:.4f}, loo_acc={:.4f}, ".format(
feature_imp_fi[i], best_tree_num, best_max_depth, best_min_samples_split,
topk, train_acc, acc)
topk_for_eval_record.append(line)
best_params_record_df = pd.DataFrame.from_dict(best_params_record)
best_params_record_df.to_csv(tdir + "RF_best_params_settings_for_feature_selection.csv", index=False)
topk_for_eval_fout = tdir + "eval_RF_topk_features.txt"
if os.path.exists(topk_for_eval_fout):
os.remove(topk_for_eval_fout)
topk_for_eval_record = [line + "\n" for line in topk_for_eval_record]
with open(topk_for_eval_fout, "a+") as f:
f.writelines(topk_for_eval_record)
if __name__ == '__main__':
fire.Fire(myRF)
|
[
"523135753@qq.com"
] |
523135753@qq.com
|
a4c07538ad3e73932b9952ec586b4714886d058d
|
de9547189c01a0c33c4c4305e0f53c407ab84f40
|
/base/controller.py
|
18a70479b55b221abf470dded09fe6c28313f568
|
[] |
no_license
|
Uskrai/alpro-tubes
|
15e43cea9fb666b124ae75b0afc830e2727cad36
|
26a3a087f3914d142994f88a808a3c8baba128c9
|
refs/heads/master
| 2023-02-16T15:03:43.569348
| 2021-01-14T03:55:10
| 2021-01-14T04:06:28
| 320,435,843
| 0
| 6
| null | 2021-01-13T06:43:14
| 2020-12-11T01:37:29
|
Python
|
UTF-8
|
Python
| false
| false
| 185
|
py
|
import abc
class ControllerBase( metaclass=abc.ABCMeta ):
@abc.abstractclassmethod
def __init__( cls ):
pass
def start(self, modul : dict ):
pass
|
[
"30656271+Uskrai@users.noreply.github.com"
] |
30656271+Uskrai@users.noreply.github.com
|
b405ec95a0255c9881fb8fb9f7e040f3fbe5d43f
|
77f18fee771336a4d86aac6043004df8c4856e58
|
/Projects/bal.py
|
dd726de8c4b645716d44bcba615b25d16bcd30ae
|
[
"MIT"
] |
permissive
|
AamodJ/manim
|
40540d93bf6d736ae35e62ddc0a5cd02396500bd
|
0fc62317ff7ecccb09dbed4011dbe221ba869ea6
|
refs/heads/master
| 2022-12-17T21:03:47.871005
| 2020-09-15T18:18:05
| 2020-09-15T18:18:05
| 197,820,247
| 0
| 0
|
NOASSERTION
| 2019-07-19T18:00:30
| 2019-07-19T18:00:29
| null |
UTF-8
|
Python
| false
| false
| 2,373
|
py
|
# from big_ol_pile_of_manim_imports import *
"""
Update the code for newer version of manim
"""
from manimlib.imports import *
class Ball(Circle):
CONFIG = {
"radius": 0.4,
"fill_color": BLUE,
"fill_opacity": 1,
"color": BLUE
}
def __init__(self, ** kwargs):
Circle.__init__(self, ** kwargs)
self.velocity = np.array((2, 0, 0))
def get_top(self):
return self.get_center()[1] + self.radius
def get_bottom(self):
return self.get_center()[1] - self.radius
def get_right_edge(self):
return self.get_center()[0] + self.radius
def get_left_edge(self):
return self.get_center()[0] - self.radius
class Box(Rectangle):
CONFIG = {
"height": 6,
"width": FRAME_WIDTH - 2,
"color": GREEN_C
}
def __init__(self, ** kwargs):
Rectangle.__init__(self, ** kwargs) # Edges
self.top = 0.5 * self.height
self.bottom = -0.5 * self.height
self.right_edge = 0.5 * self.width
self.left_edge = -0.5 * self.width
class ContinualBallUpdate(Animation):
def __init__(self, ball, box):
self.ball = ball
self.box = box
Animation.__init__(self, ball)
def update_mobject(self, dt):
self.ball.acceleration = np.array((0, -5, 0))
self.ball.velocity = self.ball.velocity + self.ball.acceleration * dt
self.ball.shift(self.ball.velocity * dt) # Bounce off ground and roof
if self.ball.get_bottom() <= self.box.bottom or \
self.ball.get_top() >= self.box.top:
self.ball.velocity[1] = -self.ball.velocity[1]
# Bounce off walls
if self.ball.get_left_edge() <= self.box.left_edge or \
self.ball.get_right_edge() >= self.box.right_edge:
self.ball.velocity[0] = -self.ball.velocity[0]
class Bouncing_Ball(Scene):
def construct(self):
self.box = Box()
ball = Ball()
self.play(FadeIn(self.box))
self.play(FadeIn(ball))
self.ball = ball
self.add(ContinualBallUpdate(ball, self.box))
text1 = TextMobject("This is a bouncing ball")
text2 = TextMobject("Enjoy watching!")
self.wait(1)
self.play(FadeIn(text1))
self.wait(2)
self.play(Transform(text1, text2))
self.wait(10)
|
[
"aamodj23@gmail.com"
] |
aamodj23@gmail.com
|
d1f375720e2a80d9662c995e7e56fc921b74c78a
|
bdcbbf3730586631a67ba2ad397379c43370c718
|
/test_sample_functions.py
|
d9210c3ba8825dbfb727f775fde6c413cfd7d602
|
[] |
no_license
|
bklingen-calpoly/python_unittests
|
49ac73d9019e9392dec8a301685e5c3c98f57aff
|
6361cb272d034c423aff0a3f96abc0d87d390fc1
|
refs/heads/main
| 2023-08-29T01:53:17.260219
| 2021-11-09T18:35:08
| 2021-11-09T18:35:08
| 329,997,126
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,018
|
py
|
import pytest
import sample_functions
def test_sum():
num1 = 5
num2 = 10
assert sample_functions.sum(num1, num2) == 15
def test_contains_numbers():
input_str = "el12lk3j5mnfadf"
assert sample_functions.contains_numbers(input_str) == True
def test_does_not_contain_numbers():
input_str = "lkqwjqlkjlkjed"
assert sample_functions.contains_numbers(input_str) == False
def test_div():
num1 = 10
num2 = 5
expected = 2
assert sample_functions.div(num1, num2) == expected
def test_div_by_zero():
num1 = 10
num2 = 0
with pytest.raises(ZeroDivisionError):
sample_functions.div(num1, num2)
# Create separate and independent test cases
# For instance, avoid this:
def test_div2():
num1 = 10
num2 = 5
expected = 2
assert sample_functions.div(num1, num2) == expected
num2 = 0
with pytest.raises(ZeroDivisionError):
sample_functions.div(num1, num2)
# Two aspects being tested under the same test case. Not a good practice.
|
[
"bklingen@calpoly.edu"
] |
bklingen@calpoly.edu
|
b51a35e2727816f5a9ed2e9f5efa29b51e6e40db
|
b14abc368251925fe5afe51fafb88d99d0814f87
|
/w.py
|
8e7c6ee984eb857f524fcd0ea9f2a2895eb48930
|
[] |
no_license
|
altoenergy/sledge
|
737e90a9847f9df9a7dea102c739a5f62e7e7503
|
2dc872739dadf0fb1d93b80032bf42b93e6fc28e
|
refs/heads/master
| 2021-01-16T00:17:46.952381
| 2013-07-30T12:39:38
| 2013-07-30T12:39:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,736
|
py
|
import math
import numpy as np
import portfolio as ptf
def init_f(size):
initF_ = np.empty(size)
initF_.fill(1.0 / size)
return initF_
def eval_w(prevF_, x__, w__, wParams):
iMax = np.size(prevF_)
f_ = np.empty(iMax)
wEval = wParams['eval']
if (wEval == 'exp'):
for i in range(iMax):
# x__[i, -1] = prevF_[i]
x__[i][-1] = prevF_[i]
f_[i] = math.exp(np.dot(w__[i], x__[i]))
F_ = f_ / np.sum(f_)
elif (wEval == 'pv'):
for i in range(iMax):
x__[i, -1] = prevF_[i]
f_[i] = np.dot(w__[i], x__[i])
m = 1.0 / iMax
F_ = np.minimum(m, np.maximum(-m, f_))
else:
raise InputError("wEval %s unknown" % wEval)
return F_
def run_w(portfolio, w__, wParams):
F__ = np.empty([portfolio.tMax, portfolio.iMax])
F__[0] = init_f(portfolio.iMax)
for t in range(1, portfolio.tMax):
F__[t] = eval_w(F__[t - 1], portfolio.x___[t], w__, wParams)
return F__
def run_W(portfolio, W_, wParams):
return run_w(portfolio, portfolio.split(W_), wParams)
def init(size, wParams):
wInit = wParams['init']
if (wInit == 'rand'):
return math.sqrt(3.0 / size) * (2 * np.array(np.random.random(size)) - 1)
elif (wInit == 'es'):
return np.array([-17.8315784643401, 7.54831415172555, 7.10669245771246, 3.08458031669017,
-18.0719442410111, 10.4235863115538, 9.09097757214456, 3.27278753890559,
-14.7971106946605, 0.803001968346536, 0.649026283159846, -3.4110717061195])
elif (wInit == 'pv'):
return (2 * np.array(np.random.random(size)) - 1) / size
else:
raise InputError("wInit %s unknown" % wInit)
|
[
"paul.varnish@altoenergy.com"
] |
paul.varnish@altoenergy.com
|
bba4df0d747d31dec7d8f7945862c9049f93d47e
|
58963c1f56c4828e56e53deece68fca3a83db921
|
/products/migrations/0003_auto_20190524_1335.py
|
e4cabc68dbb725038c5c31da469c5458f29b1278
|
[
"MIT"
] |
permissive
|
vitali-r/students-lab
|
5157b6c66213a4275adfd7b507c0f4ee1c8013a0
|
574ad0249ee40b799a2e8faaced3661915bee756
|
refs/heads/master
| 2022-05-11T17:22:30.876609
| 2019-06-13T10:19:37
| 2019-06-13T10:19:37
| 187,835,718
| 0
| 0
|
MIT
| 2022-04-22T21:16:56
| 2019-05-21T12:46:27
|
CSS
|
UTF-8
|
Python
| false
| false
| 503
|
py
|
# Generated by Django 2.2.1 on 2019-05-24 10:35
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('products', '0002_auto_20190524_1316'),
]
operations = [
migrations.AlterField(
model_name='product',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='products', to='products.Category'),
),
]
|
[
"Viachaslau.Pasedzka@itechart-group.com"
] |
Viachaslau.Pasedzka@itechart-group.com
|
c97b9a9eb8da39d90c6f7da18b80bbdae55369b5
|
b3cebd6c0eb8573a076e50281e49d9ac7953c003
|
/PigLatin.py
|
273d37bed6cb7a3381fc731a08d61a14e53ac29a
|
[] |
no_license
|
Savvasun/PigLatin
|
9264fcd1ce24e8fc0428696a31ed25917b61e420
|
cb91b837800837eab515f507d49219d9cbc61329
|
refs/heads/master
| 2022-10-21T21:54:28.513976
| 2020-06-03T17:58:55
| 2020-06-03T17:58:55
| 256,293,614
| 1
| 0
| null | 2020-06-03T17:12:15
| 2020-04-16T18:12:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,769
|
py
|
#This prompts you to enter your message in English.
#It also declares some variables.
text = input("Type your message in english: ")
words = text.split()
print(words)
mode = input("Do you want simple Pig Latin (simple), custom Pig Latin (custom), or classic Pig Latin? (classic): ")
vowels = "aeiou"
vows = list(vowels)
consonants = "bcdfghjklmnpqrstvwxyz"
cons = list(consonants)
moveLet = 0
answer = ""
if (mode == "custom"):
#This mode runs if "custom" is chosen.
#These prompt you to chose how many letters you want moved and what to add at the end.
num = int(input("How many of the first letters of the word do you want moved?: "))
textad = input("What text do you want to be added at the end of each word?: ")
for i in range(len(words)):
#Here is the loop that generates the words.
word = words[i]
answer += word[num:] + word[:num] + textad + " "
if (mode == "simple"):
#This mode runs if "simple" is chosen
#It simply moves two letters and adds an "ay"
for i in range(len(words)):
word = words[i]
answer += word[2:] + word[:2] + "ay" + " "
if (mode == "classic"):
#This mode runs if "classic" is chosen.
for i in range(len(words)):
#This loop iterates through each word.
word = words[i]
print(word)
for e in range(len(word)):
#This loop iterates through each letter of the word.
print(word[e])
if (word[e] in cons and word[e + 1] in vows):
moveLet = word.find(word[e]) - 1
print("MoveLet is: " + word[moveLet])
else:
continue
answer += word[moveLet:] + word[:moveLet] + "ay" + " "
print(answer)
input("Press ENTER to exit the program.")
|
[
"savvazhukov2008@gmail.com"
] |
savvazhukov2008@gmail.com
|
a63b0abfeeda03300295f742666770391b977866
|
b7f6302fb73b28eb6830277feb8b5d6eda5f9db1
|
/python/curl.py
|
c0def548737d34c449227c13f5a902a91f661c9e
|
[] |
no_license
|
chengongliang/Linux
|
47a0b4d6d6b205b8b240146d91659dc4829aa9a9
|
61dcde713984c33c1ac68fc47bbd058a2735361b
|
refs/heads/master
| 2020-04-06T06:03:12.658382
| 2019-07-10T03:39:20
| 2019-07-10T03:39:20
| 47,671,838
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,105
|
py
|
#!/usr/bin/env python
#coding:utf8
import os
import sys
import pycurl
def main(url):
c = pycurl.Curl()
c.setopt(pycurl.URL, url)
c.setopt(pycurl.CONNECTTIMEOUT, 5)
c.setopt(pycurl.TIMEOUT, 5)
c.setopt(pycurl.NOPROGRESS, 1)
c.setopt(pycurl.FORBID_REUSE, 1)
c.setopt(pycurl.MAXREDIRS, 1)
c.setopt(pycurl.DNS_CACHE_TIMEOUT, 30)
indexfile = open(os.path.dirname(os.path.realpath(__file__)) + "/content.txt", "wb")
c.setopt(pycurl.WRITEHEADER, indexfile)
c.setopt(pycurl.WRITEDATA, indexfile)
try:
c.perform()
except Exception, e:
print "connection error: " + str(e)
indexfile.close()
c.close()
sys.exit()
NAMELOOKUP_TIME = c.getinfo(c.NAMELOOKUP_TIME) #获取DNS解析时间
CONNECT_TIME = c.getinfo(c.CONNECT_TIME) #获取建立连接时间
PRETRANSFER_TIME = c.getinfo(c.PRETRANSFER_TIME) #获取从建立连接到准备传输消耗时间
STARTTRANSFER_TIME = c.getinfo(c.STARTTRANSFER_TIME) #获取从建立连接到传输开始消耗的时间
TOTAL_TIME = c.getinfo(c.TOTAL_TIME) #获取传输的总时间
HTTP_CODE = c.getinfo(c.HTTP_CODE) #获取HTTP状态码
SIZE_DOWNLOAD = c.getinfo(c.SIZE_DOWNLOAD) #获取下载数据包大小
HEADER_SIZE = c.getinfo(c.HEADER_SIZE) #获取HTTP头部大小
SPEED_DOWNLOAD=c.getinfo(c.SPEED_DOWNLOAD) #获取平均下载速度
#打印输出相关数据
print "HTTP状态码:%s" %(HTTP_CODE)
print "DNS解析时间:%.2f ms"%(NAMELOOKUP_TIME*1000)
print "建立连接时间:%.2f ms" %(CONNECT_TIME*1000)
print "准备传输时间:%.2f ms" %(PRETRANSFER_TIME*1000)
print "传输开始时间:%.2f ms" %(STARTTRANSFER_TIME*1000)
print "传输结束总时间:%.2f ms" %(TOTAL_TIME*1000)
print "下载数据包大小:%d bytes/s" %(SIZE_DOWNLOAD)
print "HTTP头部大小:%d byte" %(HEADER_SIZE)
print "平均下载速度:%d bytes/s" %(SPEED_DOWNLOAD)
#关闭文件及Curl对象
indexfile.close()
c.close()
if __name__ == "__main__":
url = sys.argv[1]
main(url)
|
[
"noreply@github.com"
] |
noreply@github.com
|
6c3af71fda06f5c2cb5ea1d4adafaa681165074d
|
85193841489787aafe55783e3a866b6a128ae3b6
|
/Cau02.py
|
ac950da2d25a56eb2ac78aa4f6933bd315ae9065
|
[] |
no_license
|
tungrg/KTDL-1
|
8126934b82a662986e0be738c3c55f9475ca125b
|
72b4779d69202d2141c6e6a2b88163212639ffe7
|
refs/heads/main
| 2023-03-02T04:04:49.980565
| 2021-02-01T14:20:36
| 2021-02-01T14:20:36
| 334,970,354
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 352
|
py
|
def cau02(listRow):
count=0
#Duyệt từng dòng
for row in listRow:
#xét từng giá trị của cột trong dòng đó
for key in listRow[0].keys():
if row[key] == '':
count += 1
break
#In ra số dòng bị thiếu dữ liệu
print(count)
return count
|
[
"noreply@github.com"
] |
noreply@github.com
|
73797439d36e04dea271e61b61aa8620a1227750
|
f3b233e5053e28fa95c549017bd75a30456eb50c
|
/CDK2_input/L26/26-1S_wat_20Abox/set_1.py
|
36b3bd309b7e3c60deb656a873098ec88d7a6bb5
|
[] |
no_license
|
AnguseZhang/Input_TI
|
ddf2ed40ff1c0aa24eea3275b83d4d405b50b820
|
50ada0833890be9e261c967d00948f998313cb60
|
refs/heads/master
| 2021-05-25T15:02:38.858785
| 2020-02-18T16:57:04
| 2020-02-18T16:57:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 737
|
py
|
import os
dir = '/mnt/scratch/songlin3/run/CDK2/L26/wat_20Abox/ti_one-step/26_1S/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_1.in'
temp_pbs = filesdir + 'temp_1.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_1.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_1.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
|
[
"songlin3@msu.edu"
] |
songlin3@msu.edu
|
84c2d1b9d4d9b14db0bd5e93aeac841a4e9ea9b0
|
ddd3b6663fbcc5b64fe9a96a3da87dd1460e1ab4
|
/src/routes/user.py
|
8908f3e57f79e7b83ad52961a8451953a51f62fe
|
[] |
no_license
|
ranihorev/scihive-backend
|
3d72e35829d97368a331bc85c362c7af29b63eb9
|
d246a8ed07b0fd793a1a9c3497c976cbd4957b3d
|
refs/heads/master
| 2022-06-17T17:32:35.834425
| 2021-04-02T14:40:07
| 2021-04-02T14:40:07
| 184,781,038
| 13
| 4
| null | 2022-05-25T03:51:56
| 2019-05-03T15:41:16
|
Python
|
UTF-8
|
Python
| false
| false
| 5,862
|
py
|
import os
from flask import Blueprint, jsonify
import logging
from flask_jwt_extended.view_decorators import jwt_optional
from flask_restful import Api, Resource, abort, reqparse, marshal_with, fields
from flask_jwt_extended import (create_access_token, jwt_required, jwt_refresh_token_required,
get_jwt_identity, get_raw_jwt, set_access_cookies, unset_access_cookies)
from google.oauth2 import id_token
from google.auth.transport import requests
from ..models import User, db, RevokedToken, Paper
from .user_utils import generate_hash, get_jwt_email, get_user_optional, verify_hash, get_user_by_email
from .notifications.index import deserialize_token
app = Blueprint('user', __name__)
api = Api(app)
logger = logging.getLogger(__name__)
parser = reqparse.RequestParser()
parser.add_argument('email', help='This field cannot be blank', required=True)
parser.add_argument('password', help='This field cannot be blank', required=True)
parser.add_argument('username', required=False)
# Based on https://github.com/oleg-agapov/flask-jwt-auth/
def make_error(status_code, message):
response = jsonify()
response.status_code = status_code
return response
class UserRegistration(Resource):
def post(self):
abort(404, message='Password registration has been removed')
def get_user_profile(user: User):
return {'username': user.username, 'firstName': user.first_name,
'lastName': user.last_name, 'email': user.email, 'provider': user.provider}
class UserLogin(Resource):
def post(self):
data = parser.parse_args()
current_user = get_user_by_email(data['email'])
if not current_user:
abort(401, message='User {} doesn\'t exist'.format(data['email']))
elif current_user.pending:
abort(403, message='User is pending. Please log in via Google')
elif current_user.provider:
abort(403, message='For security reasons, please log in via Google')
if verify_hash(data['password'], current_user.password):
access_token = create_access_token(identity=dict(email=data['email']))
resp = jsonify(get_user_profile(current_user))
set_access_cookies(resp, access_token)
return resp
else:
return abort(401, message="Wrong credentials")
class UserLogoutAccess(Resource):
@jwt_required
def post(self):
jti = get_raw_jwt()['jti']
try:
db.session.add(RevokedToken(token=jti))
db.session.commit()
resp = jsonify({'message': 'Access token has been revoked'})
unset_access_cookies(resp)
return resp
except:
return {'message': 'Something went wrong'}, 500
class TokenRefresh(Resource):
@jwt_refresh_token_required
def post(self):
current_user = get_jwt_identity()
access_token = create_access_token(identity=current_user)
return {'access_token': access_token}
class ValidateUser(Resource):
@jwt_optional
def get(self):
user = get_user_optional()
if user:
return get_user_profile(user)
return None
class Unsubscribe(Resource):
@marshal_with({'title': fields.String})
def post(self, token):
try:
email, paper_id = deserialize_token(token)
user = get_user_by_email(email)
# Verify paper exists
paper = Paper.query.get_or_404(paper_id)
except Exception as e:
abort(404, message='invalid token')
return
user.unsubscribed_papers.append(paper)
db.session.commit()
return paper
class GoogleLogin(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('token', help='This field cannot be blank', required=True, location='json')
data = parser.parse_args()
try:
info = id_token.verify_oauth2_token(data['token'], requests.Request(), os.environ.get('GOOGLE_CLIENT_ID'))
except ValueError as e:
print(e)
abort(403, message='invalid token')
email = info['email']
current_user_email = get_jwt_email()
if current_user_email and current_user_email != email:
# TODO: Allow linking non-matching email addresses
abort(403, message='Your Google email address does not match your existing user')
# create user if not missing
user = User.query.filter_by(email=email).first()
first_name: str = info.get('given_name')
last_name: str = info.get('family_name')
if not user:
username = '_'.join(filter(None, [first_name, last_name])) or email.split('@')[0]
username.replace(' ', '_')
new_user = User(username=username,
email=email, password='', first_name=first_name, last_name=last_name, provider='Google')
db.session.add(new_user)
db.session.commit()
elif not user.provider:
user.first_name = first_name
user.last_name = last_name
user.provider = 'Google'
user.pending = False
db.session.commit()
access_token = create_access_token(
identity={'email': email, 'provider': 'Google', 'first_name': first_name, 'last_name': last_name})
resp = jsonify({'message': 'User was created/merged'})
set_access_cookies(resp, access_token)
return resp
api.add_resource(GoogleLogin, '/google_login')
api.add_resource(UserRegistration, '/register')
api.add_resource(UserLogin, '/login')
api.add_resource(UserLogoutAccess, '/logout')
api.add_resource(TokenRefresh, '/token/refresh')
api.add_resource(ValidateUser, '/validate')
api.add_resource(Unsubscribe, '/unsubscribe/<token>')
|
[
"ranihorev@gmail.com"
] |
ranihorev@gmail.com
|
57bd10633782fc7bc3c7aa326762932cdf3dc8dc
|
fa699b7dc0cb1236dfa956ec0c9220c5b8f83b44
|
/magichue/__init__.py
|
c34a7b89c7384bf81c3865aaa4958f274dc6ff60
|
[
"MIT"
] |
permissive
|
lexxai/python-magichue
|
b3703ccda4a568ec84d053a541f5bc40fcf91d25
|
1507bea6844ae420185c25266a870cec69af5de0
|
refs/heads/master
| 2022-12-26T08:40:41.919257
| 2020-09-18T19:04:33
| 2020-09-18T19:04:33
| 296,468,794
| 1
| 0
|
MIT
| 2020-09-18T00:01:55
| 2020-09-18T00:01:54
| null |
UTF-8
|
Python
| false
| false
| 155
|
py
|
from .magichue import Light
from .modes import *
from .discover import discover_bulbs
__author__ = 'namacha'
__version__ = '0.2.9.1'
__license__ = 'MIT'
|
[
"mac.ayu15@gmail.com"
] |
mac.ayu15@gmail.com
|
0a933a99c1a2dafa99ec736f772b3f1e4bfbdf36
|
02cf6e548d88d36d57a719dea6cb4dde2e82bac1
|
/test_zensar/views.py
|
0094e0cfaed8500e21c3d148970984d2488dc98b
|
[] |
no_license
|
smurf-U/assignment_zensar
|
49942d40306a3825787050346cab0f325706dc29
|
d3c9254e6623f7a1fad5a55958cf286cd5a82124
|
refs/heads/master
| 2022-11-10T15:35:04.481569
| 2020-06-27T13:17:28
| 2020-06-27T13:17:28
| 275,371,518
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,634
|
py
|
import json
from django.http import JsonResponse
from django.shortcuts import render, get_object_or_404, redirect
from django.views.generic import ListView, DetailView
from rest_framework import viewsets
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
from rest_framework.generics import ListAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework import status
from .serializer import RouterManagerSerializer
from .models import RouterManager
def search_router(request, **kwarg):
data = json.loads(request.body)
if data.get('hostname'):
router_data = RouterManager.objects.filter(hostname__contains=data.get('hostname'))
elif data.get('f_ipv4') and data.get('t_ipv4'):
router_data = RouterManager.objects.filter(hostname__contains=data.get('hostname'))
else:
router_data = RouterManager.objects.all()
return JsonResponse([{
'id': router.id,
'sapid': router.sapid,
'hostname': router.hostname,
'loopback': router.loopback,
'mac_address': router.mac_address,
} for router in router_data], safe=False)
class RouterManagerList(ListView):
model = RouterManager
template_name = 'router/list.html'
context_object_name = 'routers'
class RouterManagerDetail(DetailView):
model = RouterManager
template_name = 'router/detail.html'
context_object_name = 'router'
class RouterManagerCreate(CreateView):
model = RouterManager
template_name = 'router/create.html'
fields = "__all__"
success_url = reverse_lazy('routermanager_list')
class RouterManagerUpdate(UpdateView):
model = RouterManager
template_name = 'router/update.html'
context_object_name = 'router'
fields = "__all__"
def get_success_url(self):
return reverse_lazy('routermanager_detail', kwargs={'pk': self.object.id})
class RouterManagerDelete(DeleteView):
model = RouterManager
template_name = 'router/delete.html'
success_url = reverse_lazy('routermanager_list')
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def delete_router_based_on_ip(request):
try:
ip = request.query_params.get('ip', None)
router = RouterManager.objects.get(loopback=ip)
except RouterManager.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
router.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
@api_view(['GET', 'POST'])
@permission_classes([IsAuthenticated])
def router_list(request):
"""
List all code Router, or create a new Router.
"""
if request.method == 'GET':
sapid = request.query_params.get('sapid', None)
if sapid is not None:
router = RouterManager.objects.filter(sapid__contains=sapid)
else:
router = RouterManager.objects.all()
serializer = RouterManagerSerializer(router, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = RouterManagerSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT', 'DELETE'])
@permission_classes([IsAuthenticated])
def router_detail(request, pk):
"""
Retrieve, update or delete a code router.
"""
try:
router = RouterManager.objects.get(pk=pk)
except RouterManager.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = RouterManagerSerializer(router)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = RouterManagerSerializer(router, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
router.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
@api_view(['GET'])
def api_list(request):
return Response({
'router_list_create': 'api/router/',
'router_detail_update_delete': 'api/router/{int:id}',
'router_delete_base_on_ip': 'api/router/delete_base_on_ip?ip={str:ip}',
},status=status.HTTP_200_OK)
|
[
"kbprajapati@live.com"
] |
kbprajapati@live.com
|
96f934eb181676ae4dd477fc987b8771ea85bc01
|
eb3fb1997a8e5028f35dabce69ca4f7664bd1629
|
/section06-SSH-Pexpect/pexpect-show-ver-telnet-v2.py
|
6f7e589a2872e3b391de174144041223cad33254
|
[] |
no_license
|
bit-git/docker_devcor
|
ccefcd2d25dd17cfdee97f8cdf4167db726f2592
|
b98c825001da97abb0b7eed00f20dab692a1688d
|
refs/heads/main
| 2023-01-29T04:46:32.586649
| 2020-12-11T22:58:01
| 2020-12-11T22:58:01
| 320,686,848
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,732
|
py
|
#!/usr/bin/python
import re
import pexpect
#-----------------------------------------------------------------------
def get_devices_list():
devices_list = []
file = open('devices', 'r')
for line in file:
devices_list.append( line.rstrip() )
file.close()
print('Devices list:', devices_list)
return devices_list
#-----------------------------------------------------------------------
def connect(ip_address, username, password):
print('Establishing telnet session:', ip_address, username, password)
telnet_command = 'telnet ' + ip_address
# Connect via telnet to device
session = pexpect.spawn(telnet_command, timeout=20)
result = session.expect(['Username:', pexpect.TIMEOUT])
# Check for error, if so then print error and exit
if result != 0:
print('!!! TELNET failed creating session for: ', ip_address)
exit()
# Enter the username, expect password prompt afterwards
session.sendline(username)
result = session.expect(['Password:', pexpect.TIMEOUT])
# Check for error, if so then print error and exit
if result != 0:
print('!!! Username failed: ', username)
exit()
session.sendline(password)
result = session.expect(['>', pexpect.TIMEOUT])
# Check for error, if so then print error and exit
if result != 0:
print('!!! Password failed: ', password)
exit()
print('--- Connected to: ', ip_address)
return session
#-----------------------------------------------------------------------
def get_version_info(session):
print('--- Getting version information')
session.sendline('show version | include Version')
result = session.expect(['>', pexpect.TIMEOUT])
if result != 0:
print('--- Failure getting version')
exit()
# Extract the 'version' part of the output
version_output_lines = session.before.splitlines()
version_output_parts = version_output_lines[1].split(',')
version = version_output_parts[2].strip()
print('--- Got version: ', version)
return version
#-----------------------------------------------------------------------
devices_list = get_devices_list() # Get list of devices
version_file_out = open('version-info-out', 'w')
# Loop through all the devices in the devices list
for ip_address in devices_list:
# Connect to the device via CLI and get version information
session = connect(ip_address, 'cisco', 'cisco')
device_version = get_version_info(session)
session.close() # Close the session
version_file_out.write('IP: '+ ip_address + ' Version: ' + device_version + '\n')
# Done with all devices and writing the file, so close
version_file_out.close()
|
[
"omer.chohan@bt.com"
] |
omer.chohan@bt.com
|
2fb8ff929f56898137184d40c8835d32b18fa869
|
9b67fa4378ed821a2a9f052ef14209a6b01b7c99
|
/server/server/app/views.py
|
b02f37680c3c10afe1390448bd267d4f648d0f62
|
[] |
no_license
|
lifei-cn/demo_oauth2
|
5de52b6bffef2a83be60b59f073354e115793a34
|
b988022ff8158f933250f7247bbd0a15c8a1c123
|
refs/heads/master
| 2021-06-22T19:57:44.326400
| 2019-08-19T09:24:34
| 2019-08-19T09:24:34
| 202,697,039
| 2
| 0
| null | 2021-03-19T22:37:12
| 2019-08-16T09:13:43
|
Python
|
UTF-8
|
Python
| false
| false
| 534
|
py
|
from django.shortcuts import render
from oauth2_provider.decorators import protected_resource
from django.http import HttpResponse
import json
@protected_resource(scopes=['read'])
def profile(request):
return HttpResponse(json.dumps({
"id": request.resource_owner.id,
"username": request.resource_owner.username,
"email": request.resource_owner.email,
"first_name": request.resource_owner.first_name,
"last_name": request.resource_owner.last_name
}), content_type="application/json")
|
[
"lifei@lifeis-MacBook-Pro.local"
] |
lifei@lifeis-MacBook-Pro.local
|
765d28fd3c7162aa08d4563b8ce9836633e54eb7
|
f9299e3d8510d701e2323effd50eb45550318195
|
/watcher.py
|
37d1110085604950e8ebe0940aee33e304bb0447
|
[] |
no_license
|
nomad1072/openwhisk-image-manipulation
|
a742a4246fdf5955c3be77cdb599b29da72b683f
|
720951a58f4fb9392c6eb30e55f9f198fb826d65
|
refs/heads/master
| 2022-12-27T06:28:43.877723
| 2020-03-26T04:39:34
| 2020-03-26T04:39:34
| 243,813,835
| 0
| 0
| null | 2022-12-10T22:59:31
| 2020-02-28T17:05:15
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,118
|
py
|
import sys
import time
import logging
import requests
import base64
import json
from PIL import Image
from watchdog.observers import Observer
from watchdog.events import LoggingEventHandler, FileSystemEventHandler
class Event(FileSystemEventHandler):
def on_created(self, event):
print('Foobar')
print('Event: ', event.src_path, event.event_type)
image = Image.open(event.src_path)
image.show()
# img = base64.encodebytes(image)
# files = {
# "content": "hello",
# "img": image.decode('ascii')
# }
# headers = {
# 'Content-Type': "application/json"
# }
# OW_HOST = "127.0.0.1:3000"
# image_url = "http://" + OW_HOST + "/api/image"
# response = requests.post(image_url, data=json.dumps(files), headers=headers)
# print('Image: ', image)
# OW_HOST = "127.0.0.1"
# image.show()
def dispath(self, event):
# OW_HOST="172.31.36.188"
# OW_AUTH_USER="789c46b1-71f6-4ed5-8c54-816aa4f8c502"
# OW_AUTH_PASS="abczO3xZCLrMN6v2BKK1dXYFpXlPkccOFqm12CdAsMgRU4VrNZ9lyGVCGuMDGIwP"
# params = {
# "EXECUTION_TYPE": "asynchronous",
# }
OW_HOST = "127.0.0.1"
print('Foobar')
print('Event: ', event)
# url = "http://" + OW_HOST + "/api/v1/namespaces/_/actions/processImage"
url = "http://" + OW_HOST + "/api/image"
# r = requests.request(url, auth=(OW_AUTH_USER, OW_AUTH_PASS), body=params)
r = requests.post(url)
print('Request: ', r.json())
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
path = sys.argv[1] if len(sys.argv) > 1 else '.'
event_handler = Event()
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
[
"siddharthlanka@gmail.com"
] |
siddharthlanka@gmail.com
|
5f2b6b22643e3f0908eee107a65b989b1f00b1fd
|
1709d783e50c72fe3e0c9937fbb8a2f794d4934a
|
/polls/migrations/0011_auto_20200326_2137.py
|
ba76d08e2dbb676ab511b03a8cc8a0650c57309d
|
[] |
no_license
|
Mateusz1kar/BillTeam
|
8887b467d37baec1be75e646ac1ec72f7a643be6
|
f0738272365326728e3fb30f96d378576797c727
|
refs/heads/master
| 2021-04-05T13:58:07.272291
| 2020-03-26T21:32:50
| 2020-03-26T21:32:50
| 248,563,707
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 447
|
py
|
# Generated by Django 3.0.4 on 2020-03-26 20:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('polls', '0010_auto_20200326_2137'),
]
operations = [
migrations.RemoveField(
model_name='notification',
name='date_end',
),
migrations.RemoveField(
model_name='notification',
name='date_start',
),
]
|
[
"kar.mateusz@wp.pl"
] |
kar.mateusz@wp.pl
|
cf188fc220a015f1d76281283d7c13e5b1599703
|
d248e44954328edd4fa009a21ec4cf5ae9513c8c
|
/Exercise_1.py
|
7ed6c6adb1273eddccd699200f457c4acc363040
|
[] |
no_license
|
Rakshitha94/PreCourse_1
|
0009c2b382c49066c73c66a3bbb78c64351e9d5d
|
b760179bbdc857f952c421146bbc7c061a6bb7ce
|
refs/heads/master
| 2020-06-25T17:06:42.974708
| 2019-07-29T05:46:26
| 2019-07-29T05:46:26
| 199,373,499
| 0
| 0
| null | 2019-07-29T03:43:52
| 2019-07-29T03:43:52
| null |
UTF-8
|
Python
| false
| false
| 832
|
py
|
def createStack():
stack=[]
return stack
def isEmpty(stack):
return len(stack)==0
def push(stack,item):
stack.append(item)
print(item + " pushed")
def pop(stack):
if(isEmpty(stack)):
return -1
return stack.pop()
def peek(stack):
if isEmpty(stack):
return - 1
return stack[len(stack)-1]
if __name__== "__main__":
stack1 = createStack()
print(isEmpty(stack1))
push(stack1, str(10))
push(stack1, str(20))
print(isEmpty(stack1))
print(peek(stack1)+"item at top")
push(stack1, str(30))
push(stack1, str(200))
print(peek(stack1))
print(pop(stack1) + " popped from stack")
print(pop(stack1) + " popped from stack")
print(pop(stack1) + " popped from stack")
print(pop(stack1) + " popped from stack")
print(isEmpty(stack1))
|
[
"lalitharr@gmail.com"
] |
lalitharr@gmail.com
|
4ce24f32d7afe68264e338a9c35878ac99da8a38
|
fa852eb0102e38b43c1053c31c75570f9bb0445b
|
/problems/minimumDominoRotation.py
|
24bb5809515e52adb401f75b9aa2c31846ebef52
|
[] |
no_license
|
wenyaowu/leetcode-js
|
2380d37dab9a103ba994230e1930d42d2e97bce3
|
377143875d8a0fce924dddcf7167a4312e80070d
|
refs/heads/master
| 2020-08-07T05:15:14.745363
| 2020-04-06T20:12:15
| 2020-04-06T20:12:15
| 213,312,800
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,474
|
py
|
"""
In a row of dominoes, A[i] and B[i] represent the top and bottom halves of the i-th domino.
(A domino is a tile with two numbers from 1 to 6 - one on each half of the tile.)
We may rotate the i-th domino, so that A[i] and B[i] swap values.
Return the minimum number of rotations so that all the values in A are the same, or all the values in B are the same.
If it cannot be done, return -1.
Example 1:
Input: A = [2,1,2,4,2,2], B = [5,2,6,2,3,2]
Output: 2
Explanation:
The first figure represents the dominoes as given by A and B: before we do any rotations.
If we rotate the second and fourth dominoes, we can make every value in the top row equal to 2, as indicated by the second figure.
Example 2:
Input: A = [3,5,1,2,3], B = [3,6,3,3,4]
Output: -1
Explanation:
In this case, it is not possible to rotate the dominoes to make one row of values equal.
Note:
1 <= A[i], B[i] <= 6
2 <= A.length == B.length <= 20000
"""
class Solution:
def minDominoRotations(self, A: List[int], B: List[int]) -> int:
n = len(A)
aCount = [0 for i in range(7)]
bCount = [0 for i in range(7)]
same = [ 0 for i in range(7)]
for i in range(n):
aCount[A[i]] +=1
bCount[B[i]] +=1
if A[i] == B[i]:
same[A[i]] +=1
for i in range(1, 7):
if((aCount[i] + bCount[i] - same[i])==n):
return n-max(aCount[i], bCount[i])
return -1
|
[
"wen-yao.wu@cigna.com"
] |
wen-yao.wu@cigna.com
|
1d26faee52aebb1a4aeb52fc612b72efa2b1732a
|
91e91cd6cbca801065480e4e11178a30294cb2a2
|
/plan/migrations/0002_auto_20171026_1636.py
|
62978a6834035d759bc30be943f6c111206b0804
|
[] |
no_license
|
Slavvok/conference-app
|
3a501b03da853fd4b21802939a5bda9434f22894
|
cd20a338ee33222453fadec4e3a9ce800d6e2662
|
refs/heads/master
| 2021-08-14T14:30:27.653684
| 2017-11-16T01:14:02
| 2017-11-16T01:14:02
| 110,745,526
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 717
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-26 13:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('plan', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='presentation',
options={},
),
migrations.RenameField(
model_name='presentation',
old_name='essay',
new_name='description',
),
migrations.AddField(
model_name='presentation',
name='upload',
field=models.FileField(null=True, upload_to='presentation'),
),
]
|
[
"slavvok@gmail.com"
] |
slavvok@gmail.com
|
5af6b7fe044a6e854da42d9b35f8adab2005fab2
|
8e138461e9ed8e36245965e215685ce978742535
|
/qiskit/transpiler/passes/utils/gate_direction.py
|
90e66821a268e9c5a5fce4ec630584ef532d26a3
|
[
"Apache-2.0"
] |
permissive
|
faraimazh/qiskit-terra
|
15d8c378114ee109f7b757a7d3795b4c9079c0a8
|
11c2e3ed89452cb6487db784c17c68a8a6284a57
|
refs/heads/master
| 2023-03-16T11:31:27.071954
| 2022-09-27T00:33:02
| 2022-09-27T00:33:02
| 220,650,207
| 0
| 0
|
Apache-2.0
| 2023-03-06T18:13:26
| 2019-11-09T13:59:40
|
Python
|
UTF-8
|
Python
| false
| false
| 8,988
|
py
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Rearrange the direction of the cx nodes to match the directed coupling map."""
from math import pi
from qiskit.transpiler.layout import Layout
from qiskit.transpiler.basepasses import TransformationPass
from qiskit.transpiler.exceptions import TranspilerError
from qiskit.circuit import QuantumRegister
from qiskit.dagcircuit import DAGCircuit
from qiskit.circuit.library.standard_gates import RYGate, HGate, CXGate, ECRGate, RZXGate
class GateDirection(TransformationPass):
"""Modify asymmetric gates to match the hardware coupling direction.
This pass makes use of the following identities::
┌───┐┌───┐┌───┐
q_0: ──■── q_0: ┤ H ├┤ X ├┤ H ├
┌─┴─┐ = ├───┤└─┬─┘├───┤
q_1: ┤ X ├ q_1: ┤ H ├──■──┤ H ├
└───┘ └───┘ └───┘
┌──────┐ ┌───────────┐┌──────┐┌───┐
q_0: ┤0 ├ q_0: ┤ RY(-pi/2) ├┤1 ├┤ H ├
│ ECR │ = └┬──────────┤│ ECR │├───┤
q_1: ┤1 ├ q_1: ─┤ RY(pi/2) ├┤0 ├┤ H ├
└──────┘ └──────────┘└──────┘└───┘
┌──────┐ ┌───┐┌──────┐┌───┐
q_0: ┤0 ├ q_0: ┤ H ├┤1 ├┤ H ├
│ RZX │ = ├───┤│ RZX │├───┤
q_1: ┤1 ├ q_1: ┤ H ├┤0 ├┤ H ├
└──────┘ └───┘└──────┘└───┘
"""
def __init__(self, coupling_map, target=None):
"""GateDirection pass.
Args:
coupling_map (CouplingMap): Directed graph represented a coupling map.
target (Target): The backend target to use for this pass. If this is specified
it will be used instead of the coupling map
"""
super().__init__()
self.coupling_map = coupling_map
self.target = target
# Create the replacement dag and associated register.
self._cx_dag = DAGCircuit()
qr = QuantumRegister(2)
self._cx_dag.add_qreg(qr)
self._cx_dag.apply_operation_back(HGate(), [qr[0]], [])
self._cx_dag.apply_operation_back(HGate(), [qr[1]], [])
self._cx_dag.apply_operation_back(CXGate(), [qr[1], qr[0]], [])
self._cx_dag.apply_operation_back(HGate(), [qr[0]], [])
self._cx_dag.apply_operation_back(HGate(), [qr[1]], [])
self._ecr_dag = DAGCircuit()
qr = QuantumRegister(2)
self._ecr_dag.add_qreg(qr)
self._ecr_dag.apply_operation_back(RYGate(-pi / 2), [qr[0]], [])
self._ecr_dag.apply_operation_back(RYGate(pi / 2), [qr[1]], [])
self._ecr_dag.apply_operation_back(ECRGate(), [qr[1], qr[0]], [])
self._ecr_dag.apply_operation_back(HGate(), [qr[0]], [])
self._ecr_dag.apply_operation_back(HGate(), [qr[1]], [])
@staticmethod
def _rzx_dag(parameter):
_rzx_dag = DAGCircuit()
qr = QuantumRegister(2)
_rzx_dag.add_qreg(qr)
_rzx_dag.apply_operation_back(HGate(), [qr[0]], [])
_rzx_dag.apply_operation_back(HGate(), [qr[1]], [])
_rzx_dag.apply_operation_back(RZXGate(parameter), [qr[1], qr[0]], [])
_rzx_dag.apply_operation_back(HGate(), [qr[0]], [])
_rzx_dag.apply_operation_back(HGate(), [qr[1]], [])
return _rzx_dag
def run(self, dag):
"""Run the GateDirection pass on `dag`.
Flips the cx nodes to match the directed coupling map. Modifies the
input dag.
Args:
dag (DAGCircuit): DAG to map.
Returns:
DAGCircuit: The rearranged dag for the coupling map
Raises:
TranspilerError: If the circuit cannot be mapped just by flipping the
cx nodes.
"""
trivial_layout = Layout.generate_trivial_layout(*dag.qregs.values())
layout_map = trivial_layout.get_virtual_bits()
if len(dag.qregs) > 1:
raise TranspilerError(
"GateDirection expects a single qreg input DAG,"
"but input DAG had qregs: {}.".format(dag.qregs)
)
if self.target is None:
cmap_edges = set(self.coupling_map.get_edges())
if not cmap_edges:
return dag
self.coupling_map.compute_distance_matrix()
dist_matrix = self.coupling_map.distance_matrix
for node in dag.two_qubit_ops():
control = node.qargs[0]
target = node.qargs[1]
physical_q0 = layout_map[control]
physical_q1 = layout_map[target]
if dist_matrix[physical_q0, physical_q1] != 1:
raise TranspilerError(
"The circuit requires a connection between physical "
"qubits %s and %s" % (physical_q0, physical_q1)
)
if (physical_q0, physical_q1) not in cmap_edges:
if node.name == "cx":
dag.substitute_node_with_dag(node, self._cx_dag)
elif node.name == "ecr":
dag.substitute_node_with_dag(node, self._ecr_dag)
elif node.name == "rzx":
dag.substitute_node_with_dag(node, self._rzx_dag(*node.op.params))
else:
raise TranspilerError(
f"Flipping of gate direction is only supported "
f"for CX, ECR, and RZX at this time, not {node.name}."
)
else:
# TODO: Work with the gate instances and only use names as look up keys.
# This will require iterating over the target names to build a mapping
# of names to gates that implement CXGate, ECRGate, RZXGate (including
# fixed angle variants)
for node in dag.two_qubit_ops():
control = node.qargs[0]
target = node.qargs[1]
physical_q0 = layout_map[control]
physical_q1 = layout_map[target]
if node.name == "cx":
if (physical_q0, physical_q1) in self.target["cx"]:
continue
if (physical_q1, physical_q0) in self.target["cx"]:
dag.substitute_node_with_dag(node, self._cx_dag)
else:
raise TranspilerError(
"The circuit requires a connection between physical "
"qubits %s and %s for cx" % (physical_q0, physical_q1)
)
elif node.name == "ecr":
if (physical_q0, physical_q1) in self.target["ecr"]:
continue
if (physical_q1, physical_q0) in self.target["ecr"]:
dag.substitute_node_with_dag(node, self._ecr_dag)
else:
raise TranspilerError(
"The circuit requires a connection between physical "
"qubits %s and %s for ecr" % (physical_q0, physical_q1)
)
elif node.name == "rzx":
if (physical_q0, physical_q1) in self.target["rzx"]:
continue
if (physical_q1, physical_q0) in self.target["rzx"]:
dag.substitute_node_with_dag(node, self._rzx_dag(*node.op.params))
else:
raise TranspilerError(
"The circuit requires a connection between physical "
"qubits %s and %s for rzx" % (physical_q0, physical_q1)
)
else:
raise TranspilerError(
f"Flipping of gate direction is only supported "
f"for CX, ECR, and RZX at this time, not {node.name}."
)
return dag
|
[
"noreply@github.com"
] |
noreply@github.com
|
25dbb752fd770ca7129476e4ebc8d3a1657c5c7c
|
5654998d8a024b2613fc6bbf709875e428914f81
|
/src/utils/python/arc/control/AccountingDBSQLite.py
|
3d2ba71d5e59ea83704439a5260a63ea98efb14e
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
davidgcameron/arc
|
96f4188017e8b6ce71c099fb348a09ba037c1dbb
|
9813ef5f45e5089507953239de8fa2248f5ad32c
|
refs/heads/master
| 2021-10-23T22:04:24.290988
| 2019-03-20T07:16:50
| 2019-03-20T07:16:50
| 176,705,313
| 0
| 0
|
Apache-2.0
| 2019-03-20T10:03:27
| 2019-03-20T10:03:27
| null |
UTF-8
|
Python
| false
| false
| 11,201
|
py
|
import sys
import logging
import datetime
import sqlite3
from arc.paths import *
class AccountingDBSQLite(object):
"""SQLite implementation of ARC accounting archive database"""
__dbinit_sql_script = ARC_DATA_DIR + '/accounting_sqlite_db_init.sql'
def __init__(self, db_file):
self.logger = logging.getLogger('ARC.JuraArchive.SQLiteDB')
self.con = None
try:
self.con = sqlite3.connect(db_file, detect_types=sqlite3.PARSE_DECLTYPES)
except sqlite3.Error as e:
self.logger.error('Failed to initialize SQLite connection. Error %s', str(e))
sys.exit(1)
# try to get database version
db_version = self.get_db_version()
if db_version is None:
try:
self.logger.info('Initializing archive database structure.')
with open(self.__dbinit_sql_script) as sql_f:
self.con.executescript(sql_f.read())
except sqlite3.Error as e:
self.logger.error('Failed to initialize SQLite database structure. Error %s', str(e))
self.con.close()
sys.exit(1)
else:
# In case we need to adjust database scheme, altering sql will be applied here depending on the version
self.logger.debug('Loaded archive database version %s at %s', db_version, db_file)
# init vars for records filtering methods
self.filter_str = ''
self.filter_params = ()
self.return_empty_select = False
def close(self):
"""Terminate database connection"""
if self.con is not None:
self.con.close()
self.con = None
def __del__(self):
self.close()
def _get_value(self, sql, params=(), errstr='value'):
"""General helper to get the one value from database"""
try:
for row in self.con.execute(sql, params):
return row[0]
except sqlite3.Error as e:
if params:
errstr = errstr.format(*params)
self.logger.debug('Failed to get %s. Error: %s', errstr, str(e))
return None
def __get_kv_table(self, table, value, key, startswith=''):
"""General helper to fetch key=value tables content as a dict"""
data = {}
try:
if startswith:
res = self.con.execute(
"SELECT {0},{1} FROM {2} WHERE {0} LIKE ?".format(value, key, table), (startswith + '%',))
else:
res = self.con.execute("SELECT {0},{1} FROM {2}".format(value, key, table))
# create owners dict
for row in res:
data[row[0]] = row[1]
except sqlite3.Error as e:
self.logger.error('Failed to get %s table data from accounting database. Error: %s', table, str(e))
return data
def __insert_kv_data(self, table, column, value):
"""General helper to insert key=value content to the database"""
try:
cursor = self.con.cursor()
cursor.execute("INSERT INTO {0} ({1}) VALUES (?)".format(table, column), (value,))
except sqlite3.Error as e:
self.logger.error('Failed to insert "%s" into accounting database %s table. Error: %s',
value, table, str(e))
return None
else:
self.con.commit()
return cursor.lastrowid
def get_db_version(self):
"""Get accounting databse schema version"""
return self._get_value("SELECT VarValue FROM Vars WHERE VarName = 'VERSION'", errstr='archive database version')
def get_owners(self, startswith=''):
"""Get job owners dictionary"""
return self.__get_kv_table('Owners', 'OwnerDN', 'OwnerID', startswith)
def get_vos(self, startswith=''):
"""Get job VOs dictionary"""
return self.__get_kv_table('VOs', 'VOName', 'VOId', startswith)
def get_ownerid(self, dn):
"""Get job owner ID by DN"""
return self._get_value("SELECT OwnerId FROM Owners WHERE OwnerDN = ?", (dn,), errstr='user {0} database ID')
def get_void(self, voname):
"""Get job VO ID by VO name"""
return self._get_value("SELECT VOId FROM VOs WHERE VOName = ?", (voname,), errstr='VO {0} database ID')
def add_vo(self, name):
"""Add new job VO record"""
return self.__insert_kv_data('VOs', 'VOName', name)
def add_owner(self, dn):
"""Add new job owner record"""
return self.__insert_kv_data('Owners', 'OwnerDN', dn)
def add_usagerecord(self, ur):
"""Add new accounting record"""
try:
cur = self.con.cursor()
cur.execute("INSERT OR IGNORE INTO UsageRecords VALUES (?,?,?,?,?,?,?,?,?,?,?)",
(ur['RecordId'], ur['RecordType'], ur['StartTime'], ur['EndTime'],
ur['WallTime'], ur['CpuTime'], ur['Processors'],
ur['JobName'], ur['JobID'], ur['Owner'], ur['OwnerVO']))
except sqlite3.Error as e:
self.logger.error('Failed to insert "%s" record into accounting database. Error: %s',
ur['RecordId'], str(e))
return False
else:
self.con.commit()
if not cur.rowcount:
self.logger.warning('Record "%s" is already exists in accounting database (insert ignored).',
ur['RecordId'])
return True
def filters_clear(self):
"""Clear all filters"""
self.filter_str = ''
self.filter_params = ()
def filter_type(self, typeid):
"""Add record type filtering to the select queries"""
self.filter_str += 'AND RecordType = ? '
self.filter_params += (typeid,)
def filter_vos(self, vonames):
"""Add VO filtering to the select queries"""
vos = self.get_vos()
voids = []
for vo in vonames:
if vo not in vos:
self.logger.error('There are no records with %s VO in the database.', vo)
else:
voids.append(vos[vo])
if not voids:
self.return_empty_select = True
else:
self.filter_str += 'AND VOId IN({0}) '.format(','.join(['?'] * len(voids)))
self.filter_params += tuple(voids)
def filter_owners(self, dns):
"""Add job owner DN filtering to the select queries"""
owners = self.get_owners()
ownerids = []
for dn in dns:
if dn not in owners:
self.logger.error('There are no records with %s job owner in the database.', dn)
else:
ownerids.append(owners[dn])
if not ownerids:
self.return_empty_select = True
else:
self.filter_str += 'AND OwnerId IN({0}) '.format(','.join(['?'] * len(ownerids)))
self.filter_params += tuple(ownerids)
def filter_startfrom(self, stime):
"""Add job start time filtering to the select queries"""
self.filter_str += 'AND StartTime > ? '
self.filter_params += (stime,)
def filter_endtill(self, etime):
"""Add job end time filtering to the select queries"""
self.filter_str += 'AND EndTime < ? '
self.filter_params += (etime,)
def _filtered_query(self, sql, params=(), errorstr=''):
"""Add defined filters to SQL query and execute it returning the results iterator"""
if self.return_empty_select:
return []
if self.filter_str:
if 'WHERE' in sql:
sql += ' ' + self.filter_str
else:
sql += ' WHERE' + self.filter_str[3:]
params += self.filter_params
try:
res = self.con.execute(sql, params)
return res
except sqlite3.Error as e:
params += (str(e),)
self.logger.debug('Failed to execute query: {0}. Error: %s'.format(sql.replace('?', '%s')), *params)
if errorstr:
self.logger.error(errorstr + ' Something goes wrong during SQL query. '
'Use DEBUG loglevel to troubleshoot.')
return []
def get_records_path_data(self):
"""Return records IDs and EndTime (necessary to find the file path)"""
data = []
for res in self._filtered_query("SELECT RecordId, EndTime FROM UsageRecords",
errorstr='Failed to get accounting records.'):
data.append((res[0], res[1]))
return data
def get_records_count(self):
"""Return records count"""
for res in self._filtered_query("SELECT COUNT(*) FROM UsageRecords", errorstr='Failed to get records count.'):
return res[0]
return 0
def get_records_walltime(self):
"""Return total records walltime"""
wallt = datetime.timedelta(0)
for res in self._filtered_query("SELECT WallTime FROM UsageRecords", errorstr='Failed to get walltime values'):
wallt += datetime.timedelta(seconds=res[0])
return wallt
def get_records_cputime(self):
"""Return total records cputime"""
cput = datetime.timedelta(0)
for res in self._filtered_query("SELECT CpuTime FROM UsageRecords", errorstr='Failed to get cputime values'):
cput += datetime.timedelta(seconds=res[0])
return cput
def get_records_owners(self):
"""Return list of owners for selected records"""
owners = self.get_owners()
ids = []
for res in self._filtered_query("SELECT DISTINCT OwnerId FROM UsageRecords",
errorstr='Failed to get job owners'):
ids.append(res[0])
return [dn for dn in owners.keys() if owners[dn] in ids]
def get_records_vos(self):
"""Return list of VOs for selected records"""
vos = self.get_vos()
ids = []
for res in self._filtered_query("SELECT DISTINCT VOId FROM UsageRecords",
errorstr='Failed to get job VOs'):
ids.append(res[0])
return [v for v in vos.keys() if vos[v] in ids]
def get_records_dates(self):
"""Return startdate and enddate interval for selected records"""
mindate = None
for res in self._filtered_query("SELECT MIN(StartTime) FROM UsageRecords",
errorstr='Failed to get minimum records start date'):
mindate = res[0]
maxdate = None
for res in self._filtered_query("SELECT MAX(EndTime) FROM UsageRecords",
errorstr='Failed to get maximum records start date'):
maxdate = res[0]
return mindate, maxdate
def delete_records(self):
"""Remove records from database"""
if not self.filter_str:
self.logger.error('Removing records without applying filters is not allowed')
return False
self._filtered_query("DELETE FROM UsageRecords")
self.con.commit()
return True
|
[
"manf@grid.org.ua"
] |
manf@grid.org.ua
|
49b4e0b91d57155a69ce4080265a0ee06dd8bf3c
|
159d4ae61f4ca91d94e29e769697ff46d11ae4a4
|
/venv/lib/python3.9/site-packages/webdriver_manager/archive.py
|
f827dc3151deda496a84de6fc9aa5809d377ab0e
|
[
"MIT"
] |
permissive
|
davidycliao/bisCrawler
|
729db002afe10ae405306b9eed45b782e68eace8
|
f42281f35b866b52e5860b6a062790ae8147a4a4
|
refs/heads/main
| 2023-05-24T00:41:50.224279
| 2023-01-22T23:17:51
| 2023-01-22T23:17:51
| 411,470,732
| 8
| 0
|
MIT
| 2023-02-09T16:28:24
| 2021-09-28T23:48:13
|
Python
|
UTF-8
|
Python
| false
| false
| 1,045
|
py
|
import tarfile
import zipfile
class Archive(object):
def __init__(self, path: str):
self.file_path = path
def unpack(self, directory):
if self.file_path.endswith(".zip"):
return self.__extract_zip(directory)
elif self.file_path.endswith(".tar.gz"):
return self.__extract_tar_file(directory)
def __extract_zip(self, to_directory):
archive = zipfile.ZipFile(self.file_path)
try:
archive.extractall(to_directory)
except Exception as e:
if e.args[0] not in [26, 13] and e.args[1] not in ['Text file busy', 'Permission denied']:
raise e
return archive.namelist()
def __extract_tar_file(self, to_directory):
try:
tar = tarfile.open(self.file_path, mode="r:gz")
except tarfile.ReadError:
tar = tarfile.open(self.file_path, mode="r:bz2")
members = tar.getmembers()
tar.extractall(to_directory)
tar.close()
return [x.name for x in members]
|
[
"davidycliao@gmail.com"
] |
davidycliao@gmail.com
|
e3f55c63e9336019e278147e60049af38ee6af12
|
10cfa8981dfe98492a057e072be853fb046f8fe2
|
/DS-Unit-3-Sprint-1-Software-Engineering-master/SC/acme_report.py
|
69f709148d540e8d6f2d4b77524cc89eb07a678b
|
[
"MIT"
] |
permissive
|
nvisagan/DE_LDS3
|
88c52a56ede9dbc2d8f94a08688d32134a8584e0
|
823f4834fb6fa70ed0be30680d7ca809b7045bb9
|
refs/heads/master
| 2022-12-13T03:40:54.892079
| 2019-11-18T04:25:22
| 2019-11-18T04:25:22
| 222,361,335
| 0
| 0
|
MIT
| 2021-04-30T21:45:20
| 2019-11-18T04:08:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,621
|
py
|
from random import randint, sample, uniform
from acme import Product
# For random sample to generate names
ADJECTIVES = ['Awesome', 'Shiny', 'Impressive', 'Portable', 'Improved']
NOUNS = ['Anvil', 'Catapult', 'Disguise', 'Mousetrap', '???']
def generate_products(num_products=30):
""" Generate Acme Products """
products = []
for _ in range(num_products):
adj = ADJECTIVES[randint(0,4)]
noun = NOUNS[randint(0,4)]
name = f'{adj} {noun}'
price = randint(5, 100)
weight = randint(5, 100)
flammability = uniform(0.0, 2.5)
products.append(Product(name, price, weight, flammability))
return products
def inventory_report(products):
""" Report of the acme products """
num_unique_prod_name = []
total_price = 0
total_weight = 0
total_flammability = 0
for product in products:
if product.name not in num_unique_prod_name:
num_unique_prod_name.append(product.name)
total_price += product.price
total_weight += product.weight
total_flammability += product.flammability
total = len(num_unique_prod_name)
avg_price = total_price / total
avg_weight = total_weight / total
avg_flammability = total_flammability / total
print('\nACME CORPORATION OFFICIAL INVENTORY REPORT')
print(f'Unique product names: {len(num_unique_prod_name)}')
print(f'Average Price: {avg_price:.1f}')
print(f'Average Weight: {avg_weight:.1f}')
print(f'Average Flammability: {avg_flammability:.1f}')
if __name__ == '__main__':
inventory_report(generate_products())
|
[
"noreply@github.com"
] |
noreply@github.com
|
57bed8874c41622396c68ed6ce94e7487cf30571
|
1777f5e6f3129e5e2df75161f669eb0531355a0b
|
/myweb/mysite/mysite/settings.py
|
7006d090af6b3805289d063a436b8c28abe3339e
|
[] |
no_license
|
HakimdarC/CRUD-project-Django-Django
|
ca52b3420a3e25fcebea7f855102a9e306dcbb19
|
4fe51989e1be7940331ddb89ccc7992a6a49559a
|
refs/heads/master
| 2022-10-08T04:01:41.530990
| 2019-07-26T12:04:02
| 2019-07-26T12:04:02
| 195,555,184
| 0
| 1
| null | 2022-10-03T14:09:50
| 2019-07-06T15:33:10
|
Python
|
UTF-8
|
Python
| false
| false
| 3,250
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR = os.path.join(BASE_DIR, 'templates')
STATIC_DIR = os.path.join(BASE_DIR, 'static')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fwb46z3z92q_(2$rpx-tdtjyk&p(e_xqpvno&$kuymcqnq)vzv'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myapp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR, ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
STATIC_DIR,
]
|
[
"moseshakim23@gmail.com"
] |
moseshakim23@gmail.com
|
4777634a7108cdcae453309b391c4f10c3645123
|
0348c7aac6a373a8565c017be0c0e1df1b50514d
|
/inventory_plugins/yaml_groups.py
|
4cdeb27bfd740046e65c8b9a533e344907a8f4c0
|
[
"MIT"
] |
permissive
|
igorbelitei/ansible-inventory-yaml-groups
|
35034fd47e706ebafc6dcaceda77ccd582f88b65
|
3060b7140039c6dc8f43ecb5fa34e52d3f7e038c
|
refs/heads/master
| 2021-09-13T05:55:48.516353
| 2018-04-25T18:53:28
| 2018-04-25T18:53:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,046
|
py
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
inventory: yaml_groups
short_description: Uses a specifically YAML file as inventory source.
description:
- Alternative YAML formatted inventory for Ansible.
- Allows you to to assign groups to hosts as well as hosts to groups
- Easily make new groups that are supersets and subsets of other groups.
notes:
- To function it requires being whitelisted in configuration.
options:
yaml_extensions:
description: list of 'valid' extensions for files containing YAML
type: list
default: ['.yaml', '.yml', '.json']
url:
'''
EXAMPLES = '''
---
groups:
app1-prod:
include:
- app1
require:
- prod
app1-dev:
include:
- app1
require:
- prod
app2-prod:
hosts:
- app2-web1
app2:
include:
- app2-prod
- app2-dev
all-apps:
include:
- app1
- app2
hosts:
web-app1-prod.location1.com:
groups:
- app1
- location1
- prod
- web
db-app1-prod.location1.com:
groups:
- app1
- location1
- prod
- db
app1-dev.location1.com:
vars:
EXAMPLE: "true"
groups:
- app1
- location2
- dev
- web
- db
'''
import os
from collections import MutableMapping, Sequence
from ansible import constants as C
from ansible.errors import AnsibleParserError
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_native
from ansible.parsing.utils.addresses import parse_address
from ansible.plugins.inventory import BaseFileInventoryPlugin, detect_range, expand_hostname_range
def is_sequence(obj):
return isinstance(obj, Sequence) and not isinstance(obj, basestring)
def is_dict(obj):
return isinstance(obj, MutableMapping)
def must_be_sequence(obj, name=None):
if not is_sequence(obj):
if name:
raise AnsibleParserError('Invalid "%s" entry, requires a sequence, found "%s" instead.' % (name, type(obj)))
else:
raise AnsibleParserError('Invalid data, requires a sequence, found "%s" instead.' % (name, type(obj)))
return obj
def must_be_dict(obj, name=None):
if not is_dict(obj):
if name:
raise AnsibleParserError('Invalid "%s" entry, requires a dictionary, found "%s" instead.' % (name, type(obj)))
else:
raise AnsibleParserError('Invalid data, requires a dictionary, found "%s" instead.' % (name, type(obj)))
return obj
def must_not_be_plugin(obj):
if 'plugin' in obj:
raise AnsibleParserError('Plugin configuration YAML file, not YAML groups inventory')
if 'all' in obj:
raise AnsibleParserError('Standard configuration YAML file, not YAML groups inventory')
return obj
class InventoryModule(BaseFileInventoryPlugin):
NAME = 'yaml-groups'
def __init__(self):
super(InventoryModule, self).__init__()
def verify_file(self, path):
valid = False
if super(InventoryModule, self).verify_file(path):
file_name, ext = os.path.splitext(path)
if not ext or ext in C.YAML_FILENAME_EXTENSIONS:
valid = True
return valid
def parse(self, inventory, loader, path, cache=True):
''' parses the inventory file '''
super(InventoryModule, self).parse(inventory, loader, path)
try:
data = self.loader.load_from_file(path)
except Exception as e:
raise AnsibleParserError(e)
if not data:
raise AnsibleParserError('Parsed empty YAML file')
must_be_dict(data)
must_not_be_plugin(data)
if 'hosts' in data:
self._parse_hosts(data['hosts'])
if 'groups' in data:
self._parse_groups(data['groups'])
def _parse_hosts(self, hosts):
must_be_dict(hosts, name='hosts')
for host_name in hosts:
self._parse_host(host_name, hosts[host_name])
def _parse_host(self, host_pattern, host):
'''
Each host key can be a pattern, try to process it and add variables as needed
'''
must_be_dict(host)
(host_names, port) = self._expand_hostpattern(host_pattern)
all_group = self.inventory.groups['all']
for host_name in host_names:
self.inventory.add_host(host_name, port=port)
all_group.add_host(self.inventory.get_host(host_name))
if 'groups' in host:
self._parse_host_groups(host_names, host['groups'])
if 'vars' in host:
self._parse_host_vars(host_names, host['vars'])
def _populate_host_vars(self, hosts, variables, group=None, port=None):
for host in hosts:
self.inventory.add_host(host, group=group, port=port)
for k in variables:
self.inventory.set_variable(host, k, variables[k])
def _parse_host_vars(self, host_names, host_vars):
must_be_dict(host_vars, name='vars')
self._populate_host_vars(host_names, host_vars)
def _parse_host_groups(self, host_names, host_groups):
must_be_sequence(host_groups, name='groups')
for group_name in host_groups:
self.inventory.add_group(group_name)
for host_name in host_names:
self.inventory.add_child(group_name, host_name)
def _parse_groups(self, groups):
must_be_dict(groups, name='groups')
for group_name in sorted(groups):
self._parse_group(group_name, groups[group_name])
def _parse_group(self, group_name, group_data):
must_be_dict(group_data, name=('groups/%s %s' % (group_name, group_data)))
self.inventory.add_group(group_name)
group = self.inventory.groups[group_name]
all_group = self.inventory.groups['all']
if 'vars' in group_data:
group_vars = must_be_dict(group_data['vars'], name='vars')
for var_name in group_vars:
group.set_variable(var_name, group_vars[var_name])
if 'hosts' in group_data:
host_names = must_be_sequence(group_data['hosts'], name='hosts')
for host_name in host_names:
self.inventory.add_host(host_name)
group.add_host(host_name)
all_group.add_host(self.inventory.get_host(host_name))
if 'include' in group_data:
include_names = must_be_sequence(group_data['include'], name='include')
for include_name in include_names:
self._parse_group_include(group, include_name)
if 'require' in group_data:
require_names = must_be_sequence(group_data['require'], name='require')
for require_name in require_names:
self._parse_group_require(group, require_name)
if 'exclude' in group_data:
exclude_names = must_be_sequence(group_data['exclude'], name='exclude')
for exclude_name in exclude_names:
self._parse_group_exclude(group, exclude_name)
def _parse_group_include(self, group, include_name):
if include_name not in self.inventory.groups:
return
include_group = self.inventory.groups[include_name]
for host in include_group.get_hosts():
group.add_host(host)
def _parse_group_require(self, group, require_name):
if require_name not in self.inventory.groups:
raise AnsibleParserError('Group "%s" requires non-existant group "%s"' % (group.name, require_name))
require_group = self.inventory.groups[require_name]
for host in group.get_hosts():
if host not in require_group.get_hosts():
group.remove_host(host)
def _parse_group_exclude(self, group, exclude_name):
if exclude_name not in self.inventory.groups:
return
exclude_group = self.inventory.groups[exclude_name]
for host in exclude_group.get_hosts():
if host in group.get_hosts():
group.remove_host(host)
def _expand_hostpattern(self, hostpattern):
'''
Takes a single host pattern and returns a list of host_names and an
optional port number that applies to all of them.
'''
# Can the given hostpattern be parsed as a host with an optional port
# specification?
try:
(pattern, port) = parse_address(hostpattern, allow_ranges=True)
except:
# not a recognizable host pattern
pattern = hostpattern
port = None
# Once we have separated the pattern, we expand it into list of one or
# more host_names, depending on whether it contains any [x:y] ranges.
if detect_range(pattern):
host_names = expand_hostname_range(pattern)
else:
host_names = [pattern]
return (host_names, port)
|
[
"andrew.phillips2@canada.ca"
] |
andrew.phillips2@canada.ca
|
d3929dc97598aab6a7af1debfbe632157c441bb5
|
0b01cb61a4ae4ae236a354cbfa23064e9057e434
|
/alipay/aop/api/request/AlipayOpenMiniVersionListQueryRequest.py
|
ad88c60fc343c2034182b816202dcf7724a44190
|
[
"Apache-2.0"
] |
permissive
|
hipacloud/alipay-sdk-python-all
|
e4aec2869bf1ea6f7c6fb97ac7cc724be44ecd13
|
bdbffbc6d5c7a0a3dd9db69c99443f98aecf907d
|
refs/heads/master
| 2022-11-14T11:12:24.441822
| 2020-07-14T03:12:15
| 2020-07-14T03:12:15
| 277,970,730
| 0
| 0
|
Apache-2.0
| 2020-07-08T02:33:15
| 2020-07-08T02:33:14
| null |
UTF-8
|
Python
| false
| false
| 3,184
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenMiniVersionListQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.open.mini.version.list.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
|
[
"liuqun.lq@alibaba-inc.com"
] |
liuqun.lq@alibaba-inc.com
|
689e2c12f0bb10f410e74b1445b159f59430d7c5
|
926ad7a188910991ee6a66dc7237af9332128aa4
|
/GAT/models/base_gattn.py
|
222a8e29727e0c7546fdd306d117e5867ac99af9
|
[
"MIT"
] |
permissive
|
YunseobShin/wiki_GAT
|
26c54f810e1f0e5391ab0692b094796376d0a10c
|
ff403bc365d93977277836e9998100b8a5a5a742
|
refs/heads/master
| 2020-04-21T11:22:21.757302
| 2019-02-19T00:08:00
| 2019-02-19T00:08:00
| 169,522,830
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,492
|
py
|
import tensorflow as tf
class BaseGAttN:
def loss(logits, labels, nb_classes, class_weights):
sample_wts = tf.reduce_sum(tf.multiply(tf.one_hot(labels, nb_classes), class_weights), axis=-1)
xentropy = tf.multiply(tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits), sample_wts)
return tf.reduce_mean(xentropy, name='xentropy_mean')
def training(loss, lr, l2_coef):
# weight decay
vars = tf.trainable_variables()
lossL2 = tf.add_n([tf.nn.l2_loss(v) for v in vars if v.name not
in ['bias', 'gamma', 'b', 'g', 'beta']]) * l2_coef
# optimizer
opt = tf.train.AdamOptimizer(learning_rate=lr)
# training op
train_op = opt.minimize(loss+lossL2)
return train_op
def preshape(logits, labels, nb_classes):
new_sh_lab = [-1]
new_sh_log = [-1, nb_classes]
log_resh = tf.reshape(logits, new_sh_log)
lab_resh = tf.reshape(labels, new_sh_lab)
return log_resh, lab_resh
def confmat(logits, labels):
preds = tf.argmax(logits, axis=1)
return tf.confusion_matrix(labels, preds)
##########################
# Adapted from tkipf/gcn #
##########################
def masked_softmax_cross_entropy(logits, labels, mask):
"""Softmax cross-entropy loss with masking."""
loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(mask)
loss *= mask
return tf.reduce_mean(loss)
def masked_sigmoid_cross_entropy(logits, labels, mask):
"""Softmax cross-entropy loss with masking."""
labels = tf.cast(labels, dtype=tf.float32)
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels)
loss=tf.reduce_mean(loss,axis=1)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(mask)
loss *= mask
return tf.reduce_mean(loss)
def masked_accuracy(logits, labels, mask):
"""Accuracy with masking."""
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy_all = tf.cast(correct_prediction, tf.float32)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(mask)
accuracy_all *= mask
return tf.reduce_mean(accuracy_all)
def micro_f1(logits, labels, mask):
"""Accuracy with masking."""
predicted = tf.round(tf.nn.sigmoid(logits))
# Use integers to avoid any nasty FP behaviour
predicted = tf.cast(predicted, dtype=tf.int32)
labels = tf.cast(labels, dtype=tf.int32)
mask = tf.cast(mask, dtype=tf.int32)
# expand the mask so that broadcasting works ([nb_nodes, 1])
mask = tf.expand_dims(mask, -1)
# Count true positives, true negatives, false positives and false negatives.
tp = tf.count_nonzero(predicted * labels * mask)
tn = tf.count_nonzero((predicted - 1) * (labels - 1) * mask)
fp = tf.count_nonzero(predicted * (labels - 1) * mask)
fn = tf.count_nonzero((predicted - 1) * labels * mask)
# Calculate accuracy, precision, recall and F1 score.
precision = tp / (tp + fp)
recall = tp / (tp + fn)
fmeasure = (2 * precision * recall) / (precision + recall)
fmeasure = tf.cast(fmeasure, tf.float32)
return fmeasure
|
[
"yss@115.145.170.73"
] |
yss@115.145.170.73
|
ff5c3508ad4fd0c5674db769a295de3fe147d1f5
|
c5b155ff1df6ea9e038e101767cf0133e348994e
|
/zuriproject/urls.py
|
ad4962196d1e8f61b988bdb3c23646e588a835ae
|
[] |
no_license
|
Tashy009/Zuri-training
|
1c482660c5100ce32ca1ed88a1558c6e083086c0
|
5308fea0ee0c41b4bfcb0ca0f111f8aa74edd23d
|
refs/heads/master
| 2023-04-19T06:45:32.408971
| 2021-04-20T14:00:38
| 2021-04-20T14:00:38
| 358,369,568
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 802
|
py
|
"""zuriproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('', include('firstapp.urls')),
path('admin/', admin.site.urls),
]
|
[
"shittutaofeek009@gmail.com"
] |
shittutaofeek009@gmail.com
|
8fc5feebc189aeacbee5c7a951d963f6cdd0dec0
|
c7580ef245b8e82a3f29afa0de0258d96b42dd9b
|
/apps/webshop/__init__.py
|
94de1e730f6974c3bb08d597f996b2bf77649e03
|
[
"MIT"
] |
permissive
|
Jorrre/onlineweb4
|
083ff1ae2ba8cf457a124d4de9d3b7412dbb1e5a
|
8b7eda112c18accf02fb42542a1d20b67d1a515d
|
refs/heads/main
| 2023-07-09T13:41:57.167319
| 2021-06-07T14:47:53
| 2021-06-07T14:47:53
| 279,148,142
| 0
| 0
|
MIT
| 2020-07-12T21:07:06
| 2020-07-12T21:07:06
| null |
UTF-8
|
Python
| false
| false
| 60
|
py
|
default_app_config = "apps.webshop.appconfig.WebshopConfig"
|
[
"henrik@horluck.no"
] |
henrik@horluck.no
|
e1bbce8655b1596bb2a77c6db900e7a854d70cf5
|
2c16e24486ac92bbd37f5c6d0d00ec4ba4d48e56
|
/ex/ex1.py
|
0d5193b36e4107bb3f5edf45a87b64307424927a
|
[] |
no_license
|
alagram/lpthw
|
386b6cf7534e2f7dba2e5832d6975107f27ceb9b
|
656e7526006de80354917da881cbcbb3dbe8523a
|
refs/heads/master
| 2021-01-10T20:55:35.461722
| 2014-09-16T18:33:50
| 2014-09-16T18:33:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
# print "Hello World!"
# print "Hello Again"
# print "I like tying this."
# print "This is fun."
# print "Yay! Printing."
# print "I'd much rather you 'not'."
# print 'I "said" do not tocuh this.'
print "I am still printing..."
|
[
"albert.agram@gmail.com"
] |
albert.agram@gmail.com
|
d9aecb93dc9206914cef8b2032e80586cb4021f3
|
3c000380cbb7e8deb6abf9c6f3e29e8e89784830
|
/venv/Lib/site-packages/cobra/modelimpl/eqptcapacity/l3totalusagecap1w.py
|
8a9e840a82e552ac433c1581e5975f775e7ff967
|
[] |
no_license
|
bkhoward/aciDOM
|
91b0406f00da7aac413a81c8db2129b4bfc5497b
|
f2674456ecb19cf7299ef0c5a0887560b8b315d0
|
refs/heads/master
| 2023-03-27T23:37:02.836904
| 2021-03-26T22:07:54
| 2021-03-26T22:07:54
| 351,855,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,132
|
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class L3TotalUsageCap1w(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = StatsClassMeta("cobra.model.eqptcapacity.L3TotalUsageCap1w", "Layer3 total entries max capacity")
counter = CounterMeta("v6TotalEpCap", CounterCategory.GAUGE, "count", "Total v6 Endpoints capacity")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "v6TotalEpCapLast"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "v6TotalEpCapMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "v6TotalEpCapMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "v6TotalEpCapAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "v6TotalEpCapSpct"
counter._propRefs[PropCategory.IMPLICIT_TOTAL] = "v6TotalEpCapTtl"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "v6TotalEpCapThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "v6TotalEpCapTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "v6TotalEpCapTr"
meta._counters.append(counter)
counter = CounterMeta("v4TotalEpCap", CounterCategory.GAUGE, "count", "Total v4 Endpoints capacity")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "v4TotalEpCapLast"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "v4TotalEpCapMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "v4TotalEpCapMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "v4TotalEpCapAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "v4TotalEpCapSpct"
counter._propRefs[PropCategory.IMPLICIT_TOTAL] = "v4TotalEpCapTtl"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "v4TotalEpCapThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "v4TotalEpCapTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "v4TotalEpCapTr"
meta._counters.append(counter)
meta.moClassName = "eqptcapacityL3TotalUsageCap1w"
meta.rnFormat = "CDeqptcapacityL3TotalUsageCap1w"
meta.category = MoCategory.STATS_CURRENT
meta.label = "current Layer3 total entries max capacity stats in 1 week"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = True
meta.parentClasses.add("cobra.model.eqptcapacity.Entity")
meta.superClasses.add("cobra.model.eqptcapacity.L3TotalUsageCap")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Curr")
meta.rnPrefixes = [
('CDeqptcapacityL3TotalUsageCap1w', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "v4TotalEpCapAvg", "v4TotalEpCapAvg", 36710, PropCategory.IMPLICIT_AVG)
prop.label = "Total v4 Endpoints capacity average value"
prop.isOper = True
prop.isStats = True
meta.props.add("v4TotalEpCapAvg", prop)
prop = PropMeta("str", "v4TotalEpCapLast", "v4TotalEpCapLast", 36704, PropCategory.IMPLICIT_LASTREADING)
prop.label = "Total v4 Endpoints capacity current value"
prop.isOper = True
prop.isStats = True
meta.props.add("v4TotalEpCapLast", prop)
prop = PropMeta("str", "v4TotalEpCapMax", "v4TotalEpCapMax", 36709, PropCategory.IMPLICIT_MAX)
prop.label = "Total v4 Endpoints capacity maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("v4TotalEpCapMax", prop)
prop = PropMeta("str", "v4TotalEpCapMin", "v4TotalEpCapMin", 36708, PropCategory.IMPLICIT_MIN)
prop.label = "Total v4 Endpoints capacity minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("v4TotalEpCapMin", prop)
prop = PropMeta("str", "v4TotalEpCapSpct", "v4TotalEpCapSpct", 36711, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Total v4 Endpoints capacity suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("v4TotalEpCapSpct", prop)
prop = PropMeta("str", "v4TotalEpCapThr", "v4TotalEpCapThr", 36712, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Total v4 Endpoints capacity thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("v4TotalEpCapThr", prop)
prop = PropMeta("str", "v4TotalEpCapTr", "v4TotalEpCapTr", 36714, PropCategory.IMPLICIT_TREND)
prop.label = "Total v4 Endpoints capacity trend"
prop.isOper = True
prop.isStats = True
meta.props.add("v4TotalEpCapTr", prop)
prop = PropMeta("str", "v4TotalEpCapTrBase", "v4TotalEpCapTrBase", 36713, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "Total v4 Endpoints capacity trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("v4TotalEpCapTrBase", prop)
prop = PropMeta("str", "v4TotalEpCapTtl", "v4TotalEpCapTtl", 45299, PropCategory.IMPLICIT_TOTAL)
prop.label = "Total v4 Endpoints capacity total sum"
prop.isOper = True
prop.isStats = True
meta.props.add("v4TotalEpCapTtl", prop)
prop = PropMeta("str", "v6TotalEpCapAvg", "v6TotalEpCapAvg", 36731, PropCategory.IMPLICIT_AVG)
prop.label = "Total v6 Endpoints capacity average value"
prop.isOper = True
prop.isStats = True
meta.props.add("v6TotalEpCapAvg", prop)
prop = PropMeta("str", "v6TotalEpCapLast", "v6TotalEpCapLast", 36725, PropCategory.IMPLICIT_LASTREADING)
prop.label = "Total v6 Endpoints capacity current value"
prop.isOper = True
prop.isStats = True
meta.props.add("v6TotalEpCapLast", prop)
prop = PropMeta("str", "v6TotalEpCapMax", "v6TotalEpCapMax", 36730, PropCategory.IMPLICIT_MAX)
prop.label = "Total v6 Endpoints capacity maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("v6TotalEpCapMax", prop)
prop = PropMeta("str", "v6TotalEpCapMin", "v6TotalEpCapMin", 36729, PropCategory.IMPLICIT_MIN)
prop.label = "Total v6 Endpoints capacity minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("v6TotalEpCapMin", prop)
prop = PropMeta("str", "v6TotalEpCapSpct", "v6TotalEpCapSpct", 36732, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Total v6 Endpoints capacity suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("v6TotalEpCapSpct", prop)
prop = PropMeta("str", "v6TotalEpCapThr", "v6TotalEpCapThr", 36733, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Total v6 Endpoints capacity thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("v6TotalEpCapThr", prop)
prop = PropMeta("str", "v6TotalEpCapTr", "v6TotalEpCapTr", 36735, PropCategory.IMPLICIT_TREND)
prop.label = "Total v6 Endpoints capacity trend"
prop.isOper = True
prop.isStats = True
meta.props.add("v6TotalEpCapTr", prop)
prop = PropMeta("str", "v6TotalEpCapTrBase", "v6TotalEpCapTrBase", 36734, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "Total v6 Endpoints capacity trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("v6TotalEpCapTrBase", prop)
prop = PropMeta("str", "v6TotalEpCapTtl", "v6TotalEpCapTtl", 45300, PropCategory.IMPLICIT_TOTAL)
prop.label = "Total v6 Endpoints capacity total sum"
prop.isOper = True
prop.isStats = True
meta.props.add("v6TotalEpCapTtl", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"bkhoward@live.com"
] |
bkhoward@live.com
|
4b9394fcf22a80069c7b0fa99773765f13fc7a07
|
7d2f933ed3c54e128ecaec3a771817c4260a8458
|
/venv/Lib/site-packages/mpl_toolkits/mplot3d/art3d.py
|
2d28a808af66a5141266c6950f5966ee40a2a899
|
[] |
no_license
|
danielmoreira12/BAProject
|
c61dfb1d0521eb5a28eef9531a00e744bfb0e26a
|
859f588305d826a35cc8f7d64c432f54a0a2e031
|
refs/heads/master
| 2021-01-02T07:17:39.267278
| 2020-02-25T22:27:43
| 2020-02-25T22:27:43
| 239,541,177
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 27,020
|
py
|
# art3d.py, original mplot3d version by John Porter
# Parts rewritten by Reinier Heeres <reinier@heeres.eu>
# Minor additions by Ben Axelrod <baxelrod@coroware.com>
"""
Module containing 3D artist code and functions to convert 2D
artists into 3D versions which can be added to an Axes3D.
"""
import math
import numpy as np
from matplotlib import (
artist, cbook, colors as mcolors, lines, text as mtext, path as mpath)
from matplotlib.collections import (
LineCollection, PolyCollection, PatchCollection, PathCollection)
from matplotlib.colors import Normalize
from matplotlib.patches import Patch
from . import proj3d
def _norm_angle(a):
"""Return the given angle normalized to -180 < *a* <= 180 degrees."""
a = (a + 360) % 360
if a > 180:
a = a - 360
return a
@cbook.deprecated("3.1")
def norm_angle(a):
"""Return the given angle normalized to -180 < *a* <= 180 degrees."""
return _norm_angle(a)
def _norm_text_angle(a):
"""Return the given angle normalized to -90 < *a* <= 90 degrees."""
a = (a + 180) % 180
if a > 90:
a = a - 180
return a
@cbook.deprecated("3.1")
def norm_text_angle(a):
"""Return the given angle normalized to -90 < *a* <= 90 degrees."""
return _norm_text_angle(a)
def get_dir_vector(zdir):
"""
Return a direction vector.
Parameters
----------
zdir : {'x', 'y', 'z', None, 3-tuple}
The direction. Possible values are:
- 'x': equivalent to (1, 0, 0)
- 'y': equivalent to (0, 1, 0)
- 'z': equivalent to (0, 0, 1)
- *None*: equivalent to (0, 0, 0)
- an iterable (x, y, z) is returned unchanged.
Returns
-------
x, y, z : array-like
The direction vector. This is either a numpy.array or *zdir* itself if
*zdir* is already a length-3 iterable.
"""
if zdir == 'x':
return np.array((1, 0, 0))
elif zdir == 'y':
return np.array((0, 1, 0))
elif zdir == 'z':
return np.array((0, 0, 1))
elif zdir is None:
return np.array((0, 0, 0))
elif np.iterable(zdir) and len(zdir) == 3:
return zdir
else:
raise ValueError("'x', 'y', 'z', None or vector of length 3 expected")
class Text3D(mtext.Text):
"""
Text object with 3D position and direction.
Parameters
----------
x, y, z
The position of the text.
text : str
The text string to display.
zdir : {'x', 'y', 'z', None, 3-tuple}
The direction of the text. See `.get_dir_vector` for a description of
the values.
Other Parameters
----------------
**kwargs
All other parameters are passed on to `~matplotlib.text.Text`.
"""
def __init__(self, x=0, y=0, z=0, text='', zdir='z', **kwargs):
mtext.Text.__init__(self, x, y, text, **kwargs)
self.set_3d_properties(z, zdir)
def set_3d_properties(self, z=0, zdir='z'):
x, y = self.get_position()
self._position3d = np.array((x, y, z))
self._dir_vec = get_dir_vector(zdir)
self.stale = True
@artist.allow_rasterization
def draw(self, renderer):
proj = proj3d.proj_trans_points(
[self._position3d, self._position3d + self._dir_vec], renderer.M)
dx = proj[0][1] - proj[0][0]
dy = proj[1][1] - proj[1][0]
angle = math.degrees(math.atan2(dy, dx))
self.set_position((proj[0][0], proj[1][0]))
self.set_rotation(_norm_text_angle(angle))
mtext.Text.draw(self, renderer)
self.stale = False
def get_tightbbox(self, renderer):
# Overwriting the 2d Text behavior which is not valid for 3d.
# For now, just return None to exclude from layout calculation.
return None
def text_2d_to_3d(obj, z=0, zdir='z'):
"""Convert a Text to a Text3D object."""
obj.__class__ = Text3D
obj.set_3d_properties(z, zdir)
class Line3D(lines.Line2D):
"""
3D line object.
"""
def __init__(self, xs, ys, zs, *args, **kwargs):
"""
Keyword arguments are passed onto :func:`~matplotlib.lines.Line2D`.
"""
lines.Line2D.__init__(self, [], [], *args, **kwargs)
self._verts3d = xs, ys, zs
def set_3d_properties(self, zs=0, zdir='z'):
xs = self.get_xdata()
ys = self.get_ydata()
try:
# If *zs* is a list or array, then this will fail and
# just proceed to juggle_axes().
zs = np.full_like(xs, fill_value=float(zs))
except TypeError:
pass
self._verts3d = juggle_axes(xs, ys, zs, zdir)
self.stale = True
def set_data_3d(self, *args):
"""
Set the x, y and z data
Parameters
----------
x : array_like
The x-data to be plotted
y : array_like
The y-data to be plotted
z : array_like
The z-data to be plotted
Notes
-----
Accepts x, y, z arguments or a single array_like (x, y, z)
"""
if len(args) == 1:
self._verts3d = args[0]
else:
self._verts3d = args
self.stale = True
def get_data_3d(self):
"""
Get the current data
Returns
-------
verts3d : length-3 tuple or array_likes
The current data as a tuple or array_likes
"""
return self._verts3d
@artist.allow_rasterization
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_data(xs, ys)
lines.Line2D.draw(self, renderer)
self.stale = False
def line_2d_to_3d(line, zs=0, zdir='z'):
"""Convert a 2D line to 3D."""
line.__class__ = Line3D
line.set_3d_properties(zs, zdir)
def _path_to_3d_segment(path, zs=0, zdir='z'):
"""Convert a path to a 3D segment."""
zs = np.broadcast_to(zs, len(path))
pathsegs = path.iter_segments(simplify=False, curves=False)
seg = [(x, y, z) for (((x, y), code), z) in zip(pathsegs, zs)]
seg3d = [juggle_axes(x, y, z, zdir) for (x, y, z) in seg]
return seg3d
@cbook.deprecated("3.1")
def path_to_3d_segment(path, zs=0, zdir='z'):
"""Convert a path to a 3D segment."""
return _path_to_3d_segment(path, zs=zs, zdir=zdir)
def _paths_to_3d_segments(paths, zs=0, zdir='z'):
"""Convert paths from a collection object to 3D segments."""
zs = np.broadcast_to(zs, len(paths))
segs = [_path_to_3d_segment(path, pathz, zdir)
for path, pathz in zip(paths, zs)]
return segs
@cbook.deprecated("3.1")
def paths_to_3d_segments(paths, zs=0, zdir='z'):
"""Convert paths from a collection object to 3D segments."""
return _paths_to_3d_segments(paths, zs=zs, zdir=zdir)
def _path_to_3d_segment_with_codes(path, zs=0, zdir='z'):
"""Convert a path to a 3D segment with path codes."""
zs = np.broadcast_to(zs, len(path))
pathsegs = path.iter_segments(simplify=False, curves=False)
seg_codes = [((x, y, z), code) for ((x, y), code), z in zip(pathsegs, zs)]
if seg_codes:
seg, codes = zip(*seg_codes)
seg3d = [juggle_axes(x, y, z, zdir) for (x, y, z) in seg]
else:
seg3d = []
codes = []
return seg3d, list(codes)
@cbook.deprecated("3.1")
def path_to_3d_segment_with_codes(path, zs=0, zdir='z'):
"""Convert a path to a 3D segment with path codes."""
return _path_to_3d_segment_with_codes(path, zs=zs, zdir=zdir)
def _paths_to_3d_segments_with_codes(paths, zs=0, zdir='z'):
"""
Convert paths from a collection object to 3D segments with path codes.
"""
zs = np.broadcast_to(zs, len(paths))
segments_codes = [_path_to_3d_segment_with_codes(path, pathz, zdir)
for path, pathz in zip(paths, zs)]
if segments_codes:
segments, codes = zip(*segments_codes)
else:
segments, codes = [], []
return list(segments), list(codes)
@cbook.deprecated("3.1")
def paths_to_3d_segments_with_codes(paths, zs=0, zdir='z'):
"""
Convert paths from a collection object to 3D segments with path codes.
"""
return _paths_to_3d_segments_with_codes(paths, zs=zs, zdir=zdir)
class Line3DCollection(LineCollection):
"""
A collection of 3D lines.
"""
def set_sort_zpos(self, val):
"""Set the position to use for z-sorting."""
self._sort_zpos = val
self.stale = True
def set_segments(self, segments):
"""
Set 3D segments.
"""
self._segments3d = np.asanyarray(segments)
LineCollection.set_segments(self, [])
def do_3d_projection(self, renderer):
"""
Project the points according to renderer matrix.
"""
xyslist = [
proj3d.proj_trans_points(points, renderer.M) for points in
self._segments3d]
segments_2d = [np.column_stack([xs, ys]) for xs, ys, zs in xyslist]
LineCollection.set_segments(self, segments_2d)
# FIXME
minz = 1e9
for xs, ys, zs in xyslist:
minz = min(minz, min(zs))
return minz
@artist.allow_rasterization
def draw(self, renderer, project=False):
if project:
self.do_3d_projection(renderer)
LineCollection.draw(self, renderer)
def line_collection_2d_to_3d(col, zs=0, zdir='z'):
"""Convert a LineCollection to a Line3DCollection object."""
segments3d = _paths_to_3d_segments(col.get_paths(), zs, zdir)
col.__class__ = Line3DCollection
col.set_segments(segments3d)
class Patch3D(Patch):
"""
3D patch object.
"""
def __init__(self, *args, zs=(), zdir='z', **kwargs):
Patch.__init__(self, *args, **kwargs)
self.set_3d_properties(zs, zdir)
def set_3d_properties(self, verts, zs=0, zdir='z'):
zs = np.broadcast_to(zs, len(verts))
self._segment3d = [juggle_axes(x, y, z, zdir)
for ((x, y), z) in zip(verts, zs)]
self._facecolor3d = Patch.get_facecolor(self)
def get_path(self):
return self._path2d
def get_facecolor(self):
return self._facecolor2d
def do_3d_projection(self, renderer):
s = self._segment3d
xs, ys, zs = zip(*s)
vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
self._path2d = mpath.Path(np.column_stack([vxs, vys]))
# FIXME: coloring
self._facecolor2d = self._facecolor3d
return min(vzs)
class PathPatch3D(Patch3D):
"""
3D PathPatch object.
"""
def __init__(self, path, *, zs=(), zdir='z', **kwargs):
Patch.__init__(self, **kwargs)
self.set_3d_properties(path, zs, zdir)
def set_3d_properties(self, path, zs=0, zdir='z'):
Patch3D.set_3d_properties(self, path.vertices, zs=zs, zdir=zdir)
self._code3d = path.codes
def do_3d_projection(self, renderer):
s = self._segment3d
xs, ys, zs = zip(*s)
vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
self._path2d = mpath.Path(np.column_stack([vxs, vys]), self._code3d)
# FIXME: coloring
self._facecolor2d = self._facecolor3d
return min(vzs)
def _get_patch_verts(patch):
"""Return a list of vertices for the path of a patch."""
trans = patch.get_patch_transform()
path = patch.get_path()
polygons = path.to_polygons(trans)
if len(polygons):
return polygons[0]
else:
return []
@cbook.deprecated("3.1")
def get_patch_verts(patch):
"""Return a list of vertices for the path of a patch."""
return _get_patch_verts(patch)
def patch_2d_to_3d(patch, z=0, zdir='z'):
"""Convert a Patch to a Patch3D object."""
verts = _get_patch_verts(patch)
patch.__class__ = Patch3D
patch.set_3d_properties(verts, z, zdir)
def pathpatch_2d_to_3d(pathpatch, z=0, zdir='z'):
"""Convert a PathPatch to a PathPatch3D object."""
path = pathpatch.get_path()
trans = pathpatch.get_patch_transform()
mpath = trans.transform_path(path)
pathpatch.__class__ = PathPatch3D
pathpatch.set_3d_properties(mpath, z, zdir)
class Patch3DCollection(PatchCollection):
"""
A collection of 3D patches.
"""
def __init__(self, *args, zs=0, zdir='z', depthshade=True, **kwargs):
"""
Create a collection of flat 3D patches with its normal vector
pointed in *zdir* direction, and located at *zs* on the *zdir*
axis. 'zs' can be a scalar or an array-like of the same length as
the number of patches in the collection.
Constructor arguments are the same as for
:class:`~matplotlib.collections.PatchCollection`. In addition,
keywords *zs=0* and *zdir='z'* are available.
Also, the keyword argument "depthshade" is available to
indicate whether or not to shade the patches in order to
give the appearance of depth (default is *True*).
This is typically desired in scatter plots.
"""
self._depthshade = depthshade
super().__init__(*args, **kwargs)
self.set_3d_properties(zs, zdir)
def set_sort_zpos(self, val):
"""Set the position to use for z-sorting."""
self._sort_zpos = val
self.stale = True
def set_3d_properties(self, zs, zdir):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
offsets = self.get_offsets()
if len(offsets) > 0:
xs, ys = offsets.T
else:
xs = []
ys = []
self._offsets3d = juggle_axes(xs, ys, np.atleast_1d(zs), zdir)
self._facecolor3d = self.get_facecolor()
self._edgecolor3d = self.get_edgecolor()
self.stale = True
def do_3d_projection(self, renderer):
xs, ys, zs = self._offsets3d
vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
fcs = (_zalpha(self._facecolor3d, vzs) if self._depthshade else
self._facecolor3d)
fcs = mcolors.to_rgba_array(fcs, self._alpha)
self.set_facecolors(fcs)
ecs = (_zalpha(self._edgecolor3d, vzs) if self._depthshade else
self._edgecolor3d)
ecs = mcolors.to_rgba_array(ecs, self._alpha)
self.set_edgecolors(ecs)
PatchCollection.set_offsets(self, np.column_stack([vxs, vys]))
if vzs.size > 0:
return min(vzs)
else:
return np.nan
class Path3DCollection(PathCollection):
"""
A collection of 3D paths.
"""
def __init__(self, *args, zs=0, zdir='z', depthshade=True, **kwargs):
"""
Create a collection of flat 3D paths with its normal vector
pointed in *zdir* direction, and located at *zs* on the *zdir*
axis. 'zs' can be a scalar or an array-like of the same length as
the number of paths in the collection.
Constructor arguments are the same as for
:class:`~matplotlib.collections.PathCollection`. In addition,
keywords *zs=0* and *zdir='z'* are available.
Also, the keyword argument "depthshade" is available to
indicate whether or not to shade the patches in order to
give the appearance of depth (default is *True*).
This is typically desired in scatter plots.
"""
self._depthshade = depthshade
super().__init__(*args, **kwargs)
self.set_3d_properties(zs, zdir)
def set_sort_zpos(self, val):
"""Set the position to use for z-sorting."""
self._sort_zpos = val
self.stale = True
def set_3d_properties(self, zs, zdir):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
offsets = self.get_offsets()
if len(offsets) > 0:
xs, ys = offsets.T
else:
xs = []
ys = []
self._offsets3d = juggle_axes(xs, ys, np.atleast_1d(zs), zdir)
self._facecolor3d = self.get_facecolor()
self._edgecolor3d = self.get_edgecolor()
self.stale = True
def do_3d_projection(self, renderer):
xs, ys, zs = self._offsets3d
vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
fcs = (_zalpha(self._facecolor3d, vzs) if self._depthshade else
self._facecolor3d)
fcs = mcolors.to_rgba_array(fcs, self._alpha)
self.set_facecolors(fcs)
ecs = (_zalpha(self._edgecolor3d, vzs) if self._depthshade else
self._edgecolor3d)
ecs = mcolors.to_rgba_array(ecs, self._alpha)
self.set_edgecolors(ecs)
PathCollection.set_offsets(self, np.column_stack([vxs, vys]))
return np.min(vzs) if vzs.size else np.nan
def patch_collection_2d_to_3d(col, zs=0, zdir='z', depthshade=True):
"""
Convert a :class:`~matplotlib.collections.PatchCollection` into a
:class:`Patch3DCollection` object
(or a :class:`~matplotlib.collections.PathCollection` into a
:class:`Path3DCollection` object).
Parameters
----------
za
The location or locations to place the patches in the collection along
the *zdir* axis. Default: 0.
zdir
The axis in which to place the patches. Default: "z".
depthshade
Whether to shade the patches to give a sense of depth. Default: *True*.
"""
if isinstance(col, PathCollection):
col.__class__ = Path3DCollection
elif isinstance(col, PatchCollection):
col.__class__ = Patch3DCollection
col._depthshade = depthshade
col.set_3d_properties(zs, zdir)
class Poly3DCollection(PolyCollection):
"""
A collection of 3D polygons.
"""
def __init__(self, verts, *args, zsort='average', **kwargs):
"""
Create a Poly3DCollection.
*verts* should contain 3D coordinates.
Keyword arguments:
zsort, see set_zsort for options.
Note that this class does a bit of magic with the _facecolors
and _edgecolors properties.
"""
super().__init__(verts, *args, **kwargs)
self.set_zsort(zsort)
self._codes3d = None
_zsort_functions = {
'average': np.average,
'min': np.min,
'max': np.max,
}
def set_zsort(self, zsort):
"""
Sets the calculation method for the z-order.
Parameters
----------
zsort : {'average', 'min', 'max'}
The function applied on the z-coordinates of the vertices in the
viewer's coordinate system, to determine the z-order. *True* is
deprecated and equivalent to 'average'.
"""
if zsort is True:
cbook.warn_deprecated(
"3.1", message="Passing True to mean 'average' for set_zsort "
"is deprecated and support will be removed in Matplotlib 3.3; "
"pass 'average' instead.")
zsort = 'average'
self._zsortfunc = self._zsort_functions[zsort]
self._sort_zpos = None
self.stale = True
def get_vector(self, segments3d):
"""Optimize points for projection."""
si = 0
ei = 0
segis = []
points = []
for p in segments3d:
points.extend(p)
ei = si + len(p)
segis.append((si, ei))
si = ei
if len(segments3d):
xs, ys, zs = zip(*points)
else:
# We need this so that we can skip the bad unpacking from zip()
xs, ys, zs = [], [], []
ones = np.ones(len(xs))
self._vec = np.array([xs, ys, zs, ones])
self._segis = segis
def set_verts(self, verts, closed=True):
"""Set 3D vertices."""
self.get_vector(verts)
# 2D verts will be updated at draw time
PolyCollection.set_verts(self, [], False)
self._closed = closed
def set_verts_and_codes(self, verts, codes):
"""Sets 3D vertices with path codes."""
# set vertices with closed=False to prevent PolyCollection from
# setting path codes
self.set_verts(verts, closed=False)
# and set our own codes instead.
self._codes3d = codes
def set_3d_properties(self):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
self._sort_zpos = None
self.set_zsort('average')
self._facecolors3d = PolyCollection.get_facecolor(self)
self._edgecolors3d = PolyCollection.get_edgecolor(self)
self._alpha3d = PolyCollection.get_alpha(self)
self.stale = True
def set_sort_zpos(self, val):
"""Set the position to use for z-sorting."""
self._sort_zpos = val
self.stale = True
def do_3d_projection(self, renderer):
"""
Perform the 3D projection for this object.
"""
# FIXME: This may no longer be needed?
if self._A is not None:
self.update_scalarmappable()
self._facecolors3d = self._facecolors
txs, tys, tzs = proj3d._proj_transform_vec(self._vec, renderer.M)
xyzlist = [(txs[si:ei], tys[si:ei], tzs[si:ei])
for si, ei in self._segis]
# This extra fuss is to re-order face / edge colors
cface = self._facecolors3d
cedge = self._edgecolors3d
if len(cface) != len(xyzlist):
cface = cface.repeat(len(xyzlist), axis=0)
if len(cedge) != len(xyzlist):
if len(cedge) == 0:
cedge = cface
else:
cedge = cedge.repeat(len(xyzlist), axis=0)
# sort by depth (furthest drawn first)
z_segments_2d = sorted(
((self._zsortfunc(zs), np.column_stack([xs, ys]), fc, ec, idx)
for idx, ((xs, ys, zs), fc, ec)
in enumerate(zip(xyzlist, cface, cedge))),
key=lambda x: x[0], reverse=True)
segments_2d = [s for z, s, fc, ec, idx in z_segments_2d]
if self._codes3d is not None:
codes = [self._codes3d[idx] for z, s, fc, ec, idx in z_segments_2d]
PolyCollection.set_verts_and_codes(self, segments_2d, codes)
else:
PolyCollection.set_verts(self, segments_2d, self._closed)
self._facecolors2d = [fc for z, s, fc, ec, idx in z_segments_2d]
if len(self._edgecolors3d) == len(cface):
self._edgecolors2d = [ec for z, s, fc, ec, idx in z_segments_2d]
else:
self._edgecolors2d = self._edgecolors3d
# Return zorder value
if self._sort_zpos is not None:
zvec = np.array([[0], [0], [self._sort_zpos], [1]])
ztrans = proj3d._proj_transform_vec(zvec, renderer.M)
return ztrans[2][0]
elif tzs.size > 0:
# FIXME: Some results still don't look quite right.
# In particular, examine contourf3d_demo2.py
# with az = -54 and elev = -45.
return np.min(tzs)
else:
return np.nan
def set_facecolor(self, colors):
PolyCollection.set_facecolor(self, colors)
self._facecolors3d = PolyCollection.get_facecolor(self)
def set_edgecolor(self, colors):
PolyCollection.set_edgecolor(self, colors)
self._edgecolors3d = PolyCollection.get_edgecolor(self)
def set_alpha(self, alpha):
"""
Set the alpha transparencies of the collection.
Parameters
----------
alpha : float or None
"""
if alpha is not None:
try:
float(alpha)
except TypeError:
raise TypeError('alpha must be a float or None')
artist.Artist.set_alpha(self, alpha)
try:
self._facecolors3d = mcolors.to_rgba_array(
self._facecolors3d, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
try:
self._edgecolors = mcolors.to_rgba_array(
self._edgecolors3d, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
self.stale = True
def get_facecolor(self):
return self._facecolors2d
def get_edgecolor(self):
return self._edgecolors2d
def poly_collection_2d_to_3d(col, zs=0, zdir='z'):
"""Convert a PolyCollection to a Poly3DCollection object."""
segments_3d, codes = _paths_to_3d_segments_with_codes(
col.get_paths(), zs, zdir)
col.__class__ = Poly3DCollection
col.set_verts_and_codes(segments_3d, codes)
col.set_3d_properties()
def juggle_axes(xs, ys, zs, zdir):
"""
Reorder coordinates so that 2D xs, ys can be plotted in the plane
orthogonal to zdir. zdir is normally x, y or z. However, if zdir
starts with a '-' it is interpreted as a compensation for rotate_axes.
"""
if zdir == 'x':
return zs, xs, ys
elif zdir == 'y':
return xs, zs, ys
elif zdir[0] == '-':
return rotate_axes(xs, ys, zs, zdir)
else:
return xs, ys, zs
def rotate_axes(xs, ys, zs, zdir):
"""
Reorder coordinates so that the axes are rotated with zdir along
the original z axis. Prepending the axis with a '-' does the
inverse transform, so zdir can be x, -x, y, -y, z or -z
"""
if zdir == 'x':
return ys, zs, xs
elif zdir == '-x':
return zs, xs, ys
elif zdir == 'y':
return zs, xs, ys
elif zdir == '-y':
return ys, zs, xs
else:
return xs, ys, zs
def _get_colors(c, num):
"""Stretch the color argument to provide the required number *num*."""
return np.broadcast_to(
mcolors.to_rgba_array(c) if len(c) else [0, 0, 0, 0],
(num, 4))
@cbook.deprecated("3.1")
def get_colors(c, num):
"""Stretch the color argument to provide the required number *num*."""
return _get_colors(c, num)
def _zalpha(colors, zs):
"""Modify the alphas of the color list according to depth."""
# FIXME: This only works well if the points for *zs* are well-spaced
# in all three dimensions. Otherwise, at certain orientations,
# the min and max zs are very close together.
# Should really normalize against the viewing depth.
if len(zs) == 0:
return np.zeros((0, 4))
norm = Normalize(min(zs), max(zs))
sats = 1 - norm(zs) * 0.7
rgba = np.broadcast_to(mcolors.to_rgba_array(colors), (len(zs), 4))
return np.column_stack([rgba[:, :3], rgba[:, 3] * sats])
@cbook.deprecated("3.1")
def zalpha(colors, zs):
"""Modify the alphas of the color list according to depth."""
return _zalpha(colors, zs)
|
[
"danielmoreira12@github.com"
] |
danielmoreira12@github.com
|
9f51db102043827d93a8268e6c2d8b224866b62b
|
486bfe036b871551bb40f4a035d4137231198513
|
/src/7.demo_picture.py
|
aeee9d83dfa48b96f34bcd560708e77dc4352eaf
|
[] |
no_license
|
sinnergarden/Just_An_Assignment
|
a9bdd5d19af899e342749a9c5711e21d0e602de5
|
46660896690b41075e6666fa038b4ca380917f87
|
refs/heads/master
| 2021-04-12T08:49:56.581110
| 2018-03-21T09:05:41
| 2018-03-21T09:05:41
| 126,139,272
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,658
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 20 18:51:13 2018
@author: liuming
"""
from darkflow.net.build import TFNet
import cv2
import numpy as np
import glob
options = {"model": "cfg/yolo-fashion.cfg",
"load": -1, #"ckpt/yolo-fashion-1562.meta",
"threshold": 0.03, 'gpu':1.0}
tfnet = TFNet(options)
mark = 100
x = 1
stop = 0
image_folder = 'D:/shopee/data/test/'
next_img = 0
for jpg_file in glob.glob(image_folder + '*.jpg'):
correct_path = jpg_file.split('\\')[0] + '/' + jpg_file.split('\\')[1]
next_img = 0
while(next_img == 0):
frame = cv2.imread(correct_path)
RGB = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if x == ord('d') and mark <= 440:
mark += 10
if x == ord('a') and mark >= 60:
mark -= 10
if x == ord(' '):
next_img = 1
if x == 27:
stop = 1
break
threshold = 0.03 + 0.00225 * (mark-50)
result = tfnet.return_predict(RGB)
for detection in result:
label = detection['label']
confidence = format(float(detection['confidence']), '.2f')
tl = (int(detection['topleft']['x']),
int(detection['topleft']['y']))
br = (int(detection['bottomright']['x']),
int(detection['bottomright']['y']))
if float(confidence) >= threshold:
cv2.rectangle(frame,tl,br,(0,255,0),3)
cv2.putText(frame, label + ' ' + confidence, tl,
cv2.FONT_HERSHEY_COMPLEX_SMALL,
1,(255,255,255),1,cv2.LINE_AA)
cv2.imshow('frame', frame)
background = np.ones((500,500,3), dtype = np.uint8) * 255
cv2.putText(background, '0.03', (50,250), cv2.FONT_HERSHEY_COMPLEX_SMALL,
1,(0,0,0),1,cv2.LINE_AA)
cv2.putText(background, '1.0', (450,250), cv2.FONT_HERSHEY_COMPLEX_SMALL,
1,(0,0,0),1,cv2.LINE_AA)
cv2.line(background, (50, 250), (450, 250), (0,0,0), 3)
cv2.circle(background,(mark, 250), 8, (255,0,0), 3)
cv2.putText(background, 'current value: '+ format(threshold, '.2f'), (150,100),
cv2.FONT_HERSHEY_COMPLEX_SMALL,
1,(0,0,0),1,cv2.LINE_AA)
cv2.imshow('threshold_setting', background)
x = cv2.waitKey(10)
if stop == 1:
break
# When everything done, release the capture
cv2.destroyAllWindows()
|
[
"mingliu@xrvision.com"
] |
mingliu@xrvision.com
|
83ea250f5a82814fa9f77f670e7ff821055f4ca8
|
baa9a0a546c1a77ddc0077b34a433e49649ad918
|
/venv/bin/pip
|
1b43856e583093aa25ac2fd8dd1fe4cb721f6889
|
[] |
no_license
|
shubhampachori12110095/VehicleRouting
|
9fb2ae535808fcd168216930d9bc5fb91ee21fa6
|
c2b5c3490ff6b7b163ef2f4c50933c583c2f80c6
|
refs/heads/master
| 2022-12-21T23:20:17.019637
| 2020-10-06T15:19:34
| 2020-10-06T15:19:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
#!/Users/alohnerpiazza/Desktop/EnviaFlores/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
|
[
"alohnerpiazza@gmail.com"
] |
alohnerpiazza@gmail.com
|
|
3fc799fe13345e1eae8b48fa05b126090829b332
|
5a96112e11834d400a59b76caee33fd63831e273
|
/python3_API_framework_V2/TestCases/test_api_v2.py
|
60c0bfcfe591a2359e2ea5d8e3fd20024415a63f
|
[] |
no_license
|
zhaozongzhao/interface_test
|
d3f93c8220cb5fab5f063ce7e315e54b2f623ce6
|
f63f7e188639b34a8b80c9ce57591d9cabe3f4f8
|
refs/heads/master
| 2020-05-02T03:21:51.633352
| 2019-04-10T15:32:12
| 2019-04-10T15:32:12
| 177,726,854
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,456
|
py
|
import unittest
from Common.DoExcel import DoExcel
import os
from Common import myRequest
import ddt
from Common import dir_config
from Common import myLogger2
import logging
import re
#实例 化日志对象
#logger = MyLogger()
# 获取所有的测试数据
excelfile = dir_config.testcase_dir + "/api_info_1.xlsx"
de = DoExcel(excelfile)
all_case_datas = de.get_caseDatas_all()
print("所有的测试数据", all_case_datas)
global_vars = {}
@ddt.ddt
class Test_Api(unittest.TestCase):
@classmethod
def setUpClass(self):
de.update_init_data()
de.save_excelFile(excelfile)
@ddt.data(*all_case_datas)
def test_api(self,case_data):
global global_vars
# 使用for循环,读取每行测试数据,然后发送http请求。获取响应结果
logging.info("==============开始执行一个接口测试用例,请求数据如下===============")
logging.info("接口请求地址:%s" % case_data["url"])
logging.info("接口请求类型:{0}".format(case_data["method"]))
logging.info("接口请求数据为:{0}".format(case_data["request_data"]))
#动态替换了 - 判断请求数据当中,是否要替换全局变量的值、全局变量是否存在。
if len(global_vars)>0 and case_data["request_data"] is not None:
for key,value in global_vars.items():
if case_data["request_data"].find(key) != -1:
case_data["request_data"] = case_data["request_data"].replace(key,value)
logging.info("动态更新之后的请求数据为:\n{0}".format(case_data["request_data"]))
res = myRequest.myRequest(case_data["url"], case_data["method"], case_data["request_data"])
logging.info("本次接口请求的状态码为:%d" % res.status_code)
logging.info("接口请求的返回数据为:")
logging.info(res.text)
#先要判断测试数据当中,是否有关联字段。。如果有,则需要提取出来。按表达式提取,并且赋给指定变量。
if "related_exp" in case_data.keys():
logging.info("需要从响应结果中提取数据:")
#related_data = parse_response.get_relatedData_from_response(res.text,case_data["related_exp"])
temp = case_data["related_exp"].split("=")
res_id = re.findall(temp[1],res.text)
#动态获取了,成为全局变量。
global_vars[temp[0]] = res_id[0]
logging.info("接口请求的期望数据为:")
logging.info(case_data["expected_data"])
logging.info("期望结果与实际结果的比对方式为:")
if int(case_data["compare_type"]) == 0:
logging.info("全值匹配模式。")
try:
self.assertEqual(res.text,case_data["expected_data"])
logging.info("结果比对成功,测试用例通过")
except AssertionError:
logging.exception("结果比对失败:")
raise AssertionError
else:
logging.info("正则表达式匹配模式。")
re_obj = re.match(case_data["expected_data"],res.text)
self.assertIsNotNone(re_obj, "正则表达式匹配失败!")
logging.info("========================结束一个接口测试用例==========================")
|
[
"2206321864@qq.com"
] |
2206321864@qq.com
|
aa12413bce14f2f5aa38cac2b5f3eb2d9bc9aa3c
|
7632459a329d2899dfb37d871e5e0d9570b2ea6d
|
/AI Homework.py
|
fce99319a90634f65c80e1cc52d329f98869196f
|
[] |
no_license
|
chun1102/data-type
|
b7ad8f1bec76ebcd6fb1e7e9218ce0962ef2cede
|
7c8d9739f3538aa5de3ec87edf32158b11fdc4bc
|
refs/heads/master
| 2020-04-24T13:53:58.766193
| 2019-04-24T02:51:10
| 2019-04-24T02:51:10
| 172,003,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,367
|
py
|
# month = input('請輸入要請假的月:')
# day = input('請輸入要請假的日:')
# int_month = int(month)
# int_day = int(day)
# monthday = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
# sum_month = sum(monthday[0:int_month-1])
# sum_day = sum_month + int_day
# week = (sum_day) % 7 +1
# print('星期', week)
# ------------------------------------------------------------------------
# length1 = input('請輸入三角形的第一邊長:')
# length2 = input('請輸入三角形的第二邊長:')
# length3 = input('請輸入三角形的第三邊長:')
# int_length1 = int(length1)
# int_length2 = int(length2)
# int_length3 = int(length3)
#
# a = int_length1
# b = int_length2
# c = int_length3
#
# if a + b > c and a - b < c:
# print('1')
# if a == b and b == c:
# print('正三角形')
# elif a == b or b == c or a == c:
# print('等腰三角形')
# else:
# print('一般的三角形')
# else:
# print('不是三角形')
# ---------------------------------------------------------
# MatrixA = [0, 2, 4, 6, 8]
# MatrixB = [1, 3, 5, 7, 9]
# for i in range(0, 5):
# for j in range(0, 5):
# new_Matrix = MatrixA[i] * MatrixB[j]
# print(MatrixA[i], '*', MatrixB[j], '=', new_Matrix)
#
# --------------------------------------------------------------
for i in range (2, 200):
if ()
|
[
"chun1123581321@hotmail.com"
] |
chun1123581321@hotmail.com
|
4952991a1d640a087562cf54e0c0e68b505e879f
|
8f0d1699307a057fb73579863c36eec428fed8f4
|
/MapColoringProblem.py
|
a73bebbaa72099c79c80d6fc60628ee101541db3
|
[] |
no_license
|
EitanVilker/AI4
|
b9718f0a5e539d9a5e54ad329a423c6433f4c691
|
9e1e57beb466939d70b84e09dd607d0cb5cba0f7
|
refs/heads/main
| 2023-08-25T19:15:25.254543
| 2021-10-24T04:25:18
| 2021-10-24T04:25:18
| 416,148,960
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,756
|
py
|
from ConstraintSatisfactionProblem import ConstraintSatisfactionProblem
class MapColoringProblem(ConstraintSatisfactionProblem):
def __init__(self, variables, domains, constraints):
self.variables = variables
self.domains = domains
self.constraints = constraints
self.variable_index_dict = {}
for i in range(len(self.variables)):
self.variable_index_dict[self.variables[i]] = i
self.assignments_tried = 0
self.assignment = None
def check_constraints(self, x, y, assignment):
if y not in self.constraints[x]:
return True
if x not in assignment or y not in assignment:
return True
if assignment[x] == assignment[y]:
return False
return True
variables = ["Mexico", "Texas", "New England", "California", "Canada", "Greenland", "Iceland", "Russia", "Alaska"]
domains = []
for i in range(len(variables)):
domains.append(["r", "g", "b"])
''' Solution:
Texas = r
Mexico = Canada = g
California = New England = b
'''
constraints = {}
constraints["Mexico"] = ["Texas", "California"]
constraints["California"] = ["Mexico", "Texas", "Canada"]
constraints["Texas"] = ["Mexico", "California", "Canada", "New England"]
constraints["New England"] = ["Texas", "Canada"]
constraints["Canada"] = ["California", "Texas", "New England", "Alaska", "Greenland"]
constraints["Russia"] = ["Alaska"]
constraints["Greenland"] = ["Canada", "Iceland"]
constraints["Iceland"] = ["Greenland"]
constraints["Alaska"] = ["Canada", "Russia"]
mcp = MapColoringProblem(variables, domains, constraints)
mcp.backtracking_search(heuristic="degree", ac3=True)
print(mcp.assignment)
print("Assignments tried: " + str(mcp.assignments_tried))
|
[
"eitan.e.vilker.21@dartmouth.edu"
] |
eitan.e.vilker.21@dartmouth.edu
|
870ca82694c972d8e5693ffb3e26f5f15fed31dc
|
5986fa4765eeb352bf7cfa2c9782a22d0e6d7f86
|
/Servlet/Database/__init__.py
|
dbdc8312de517f6f615303f88de3dfda25457ea2
|
[] |
no_license
|
themerius/nox-obscura.eu
|
b81649201a7fa050202b547ffe155bf1346afc81
|
821da08e6d4bf6083921b9dce00d897ae975a364
|
refs/heads/master
| 2020-05-20T09:20:33.691837
| 2015-02-12T16:58:36
| 2015-02-12T16:58:36
| 30,710,742
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,291
|
py
|
# -*- coding: utf-8 -*-
# Nox Obscura Guildpage
# ---------------------
import couchdb # minimal Version 0.8.
from Servlet import site_cfg as cfg
viewDict = {
# User Views:
'users-1': 'dev/getCharList', # username, charList
'users-2': 'dev/points', # points, {'username':, 'charList':}
'users-3': 'dev/email', # email, emailFlags
# Raids Views:
'raids-1': 'dev/raidEvent', # [2011,8,24]
'raids-2': 'dev/getDateAndRaidId', # [date], raidId
'raids-3': 'dev/logonRaidIdUsername', # [raidId, username], doc
'raids-4': 'dev/getRaidIdAndUserstate', # [raidId,logonState], {'class':,'role':,'charName':, 'comment':, 'raidId':, 'username':}
# Post Views:
'posts-1': 'dev/getPosts', # [category, dateAndTime], doc
'posts-2': 'dev/getComments' # [postId, dateAndTime], doc
}
class AbstractData(object):
"""
Connects with the CouchDB-Server. Holds shared Functions
for DB-Communication.
Uses excessively the pre defined CouchDB-Views, for fast DB-Access.
"""
# Static Members (shared by all Classinstances)
couch = couchdb.Server(cfg.cfg_dbUrl)
#couch.resource.credentials = (cfg.cfg_dbUser, cfg.cfg_dbPassword)#Login
# Exception Classes
class NoSuchView(Exception): pass
class NoSuchDB(Exception): pass
class NoDbSet(Exception): pass
def __init__(self):
# Connect to a DB
self.users = AbstractData.couch[cfg.cfg_dbStorageLocation_Users]
self.raids = AbstractData.couch[cfg.cfg_dbStorageLocation_Raids]
self.posts = AbstractData.couch[cfg.cfg_dbStorageLocation_Posts]
# The Subclasses can set this
self.myDefaultDb = None
def readView(self, viewKey, **args):
"""Holds a List of Views,
you can access all Views from this Function.
returns:
all rows of the selected view.
parameters:
view: the view name from viewList.
args: for the view"""
return self.readViewWithOtherDb(viewKey, self.myDefaultDb, **args)
def readViewWithOtherDb(self, viewKey, myDb, **args):
"""Holds a List of Views,
you can access all Views from this Function.
returns:
all rows of the selected view.
parameters:
view: the view name from viewList.
args: for the view"""
view = viewDict[viewKey]
if view:
db = myDb
result = db.view(view, **args)
else:
raise Data.NoSuchView()
try:
if result.rows.__len__() > 0:
return result.rows
else:
return False # 0 Entries matching
except:
return False
def dbExistsForView(self, viewKey):
"""Helper for readView"""
if viewKey.find('users') != -1:
return self.users
elif viewKey.find('raids') != -1:
return self.raids
elif viewKey.finds('posts') != -1:
return self.posts
else:
raise Data.NoSuchDB()
def createNewEntry(self, data, _id = None):
if _id is None:
from uuid import uuid4
_id = uuid4().hex
if not self.myDefaultDb:
raise Data.NoDbSet()
try:
if self.myDefaultDb[_id].id:
return False # The ID already exists!
except:
pass # expected behavior.
try:
self.myDefaultDb[_id] = data
return True
except:
return False
def readEntry(self, _id):
try:
data = self.myDefaultDb[_id]
return data
except:
return False
def changeOneEntry(self, _id, entity, newData):
try:
doc = self.myDefaultDb[_id]
doc[entity] = newData # New Value for entity
self.myDefaultDb[_id] = doc # Safe into Db
return True
except:
return False
def changeEntireEntry(self, _id, newData):
"""_id = string, newData = {}
changes the data or inserts new data"""
try:
doc = self.myDefaultDb[_id]
for data in newData:
doc[data] = newData[data]
self.myDefaultDb[_id] = doc
return True
except:
return False
|
[
"sven.hodapp@gmail.com"
] |
sven.hodapp@gmail.com
|
c0a156b2bc9c5366ce1dcb8d8ac7f4eb4cf6ca0f
|
d28a9d0b1df11f87842fdece6f26ff8bb76dafc5
|
/stock_analyse_system/python/jq/get_international_indice.py
|
d636e7eef67863c7e1a354c41c87d4536207e4ee
|
[] |
no_license
|
clouderzheng/stock_analyse_system
|
9949e6080a2cd9546e610d419a9d235ca343bd49
|
61973eaee973153cd230aa977a6c9c420c99e659
|
refs/heads/master
| 2022-11-14T22:52:07.256520
| 2020-07-08T00:42:41
| 2020-07-08T00:42:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,852
|
py
|
import jqdatasdk as jq
from jqdatasdk import finance
from python.jq import login
import datetime
"""获取国际化指标"""
class InternationalIndice(object):
"""初始化登陆聚宽"""
def __init__(self):
login.login()
# """获取纳克达斯指数"""
# def get_Nasdaq_Composite_Index(self,count = 100 ):
# return self.get_International_Index("IXIC" , count)
"""获取国际指数"""
def get_International_Index(self, code ,count = 100):
q = jq.query(finance.GLOBAL_IDX_DAILY).filter(finance.GLOBAL_IDX_DAILY.code == code).order_by(
finance.GLOBAL_IDX_DAILY.day.desc()).limit(count)
return finance.run_query(q)
# """获取上证50信息"""
# def get_SSE_50_Index(self,count = 100):
# return jq.get_price('000001.XSHG', count= count, end_date=datetime.date.today(),fq='pre',)
#
# """获取深证成指信息"""
# def get_SZCZ_Index(self,count = 100):
# return jq.get_price('399001.XSHE', count= count, end_date=datetime.date.today(),fq='pre',)
"""获取国内指数信息"""
def get_internal_Index(self,code ,count = 100):
return jq.get_price(code, count= count, end_date=datetime.date.today(),fq='pre',)
"""获取指定某一只股票信息"""
def get_Stock_Price(self, code , end_date, count = 10 ,start_date=None,):
if( start_date == None ):
return jq.get_price(code , count= 100, end_date= end_date,fq='pre',)
else:
return jq.get_price(code , start_date= start_date, end_date= end_date,fq='pre',)
# query = InternationalIndice()
# data = query.get_Nasdaq_Composite_Index()
# print(data[['open','close','low','high']])
# print(data[['day','open','close','low','high']].to_json())
# print(numpy.array(data[['open','close','low','high']]))
# print(query.get_SSE_50_Index())
|
[
"zhengjingyun@unionbigdata.com"
] |
zhengjingyun@unionbigdata.com
|
46efd06e7181e3095d182fdcacca6baea3973712
|
8d375652e44b67d73102fee7abc1abaab4cb4329
|
/mcompiler/kernel/makeref.py
|
9dfdeb93aea606402be14eef4fbc0d4790b57a87
|
[
"MIT"
] |
permissive
|
paulscottrobson/old-m-versions
|
6d2061e36f2a5aaef388a4786406f876f0a06e0b
|
c2edb4200d32e066223ace4fd05837a485302645
|
refs/heads/master
| 2020-04-04T03:09:25.399283
| 2018-11-01T12:14:57
| 2018-11-01T12:14:57
| 155,709,691
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,608
|
py
|
# ***********************************************************************************************
# ***********************************************************************************************
#
# Name : makeref.py
# Purpose : make reference file.
# Author : Paul Robson (paul@robsons.org.uk)
# Created : 17th September 2018
#
# ***********************************************************************************************
# ***********************************************************************************************
import re
references = {}
#
# Read in the listing file, and extract lines with label values on it.
# (first bit is snasm-only)
#
src = [x.strip().lower() for x in open("boot.img.vice").readlines()]
#
# For each line, see if it fits the <label> = $<address>
#
for l in src:
if l.find(" _definition_") >= 0:
#print(l)
m = re.match("^al\s+c\:([0-9a-f]+)\s+_definition_([_0-9a-fmro]+)$",l)
assert m is not None,l
#
# If so, extract name and address
#
name = m.group(2)
address = int(m.group(1),16)
#
# If it is definition, get name, checking if it is a macro and
# convert back to standard ASCII
#
isMacro = False
if name[-6:] == "_macro":
name = name[:-6]
isMacro = True
name = "".join([chr(int(x,16)) for x in name.split("_")])
name = name.lower()
if isMacro:
name = "&&"+name
references[name.lower()] = address
#
# Write the file out.
#
keys = [x for x in references]
keys.sort(key = lambda x:references[x])
ref = "\n".join(["{0}:=${1:06x}".format(x,references[x]) for x in keys])
h = open("boot.dict","w").write(ref+"\n")
|
[
"paul@robsons.org.uk"
] |
paul@robsons.org.uk
|
2e59aa5d004b542b01704a4d2225100f236ee117
|
d808d22998d4498638414f924af490c2e60acbba
|
/collections/src/Collections-pt1/aula2.1.py
|
dde742f2b12edfa39f8df886adc86faaf382f96d
|
[] |
no_license
|
Mathtzt/Python
|
faeb36ba63e3fc623bc39cb2d3fd3548bcb50ad8
|
04232c99396f9768137e8a0527dda37c8b769ae9
|
refs/heads/master
| 2023-02-09T17:07:58.951086
| 2021-01-03T00:45:35
| 2021-01-03T00:45:35
| 289,573,755
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,557
|
py
|
##Esse arquivo foi criado para o estudo das Coleções no python em especial as listas e tuplas.
class ContaCorrente:
def __init__(self, codigo):
self.codigo = codigo
self.saldo = 0
def deposita(self, valor):
self.saldo += valor
def __str__(self):
return "[>>Codigo {} saldo {}<<]".format(self.codigo, self.saldo)
##Trecho para facilitar testes
def init_lista_contas():
conta_pessoal = ContaCorrente(10)
conta_pessoal.deposita(500)
##print(conta_pessoal)
conta_da_maria = ContaCorrente(11)
conta_da_maria.deposita(1000)
return [conta_pessoal, conta_da_maria]
def deposita_para_todas(contas):
for conta in contas:
conta.deposita(100)
def testes_tuplas():
"""
Quando queremos trabalhar que posições específicas significam coisas diferentes, ou seja, a estrutura da minha sequencia deve permanecer a mesma no momento que foi inicializada, isso já é indício do uso de um tupla ao invés de uma lista.
"""
conta_pessoal = ('Matheus', 25, 1995) ##Inicializando uma tupla. Obs: Note que usamos o parênteses para isso.
#conta_pessoal.append(123) ##Quebrará porque uma tupla é uma representação imutável.
##################################
if (__name__ == "__main__"):
##Testando lista de objetos
contas = init_lista_contas()
for conta in contas:
print(conta)
##Depositando 100 reais em todas as contas
deposita_para_todas(contas)
print(contas[0], contas[1])
|
[
"matheus.cn10@gmail.com"
] |
matheus.cn10@gmail.com
|
2e78637ce106bd6df1734f47548f91b1b8172d72
|
f4fbfe6ba6baaa12ff3fd79507e8397068d5c0b4
|
/blog/urls.py
|
44a9a15151dad2e98c4c5d59633eaee3263856a8
|
[] |
no_license
|
jam2a/my-first-blog
|
69516410701b5815052e8ebf58385d252f79aea9
|
70ce27a8d02f71132900aa7ac40bc30b40c8d8c6
|
refs/heads/master
| 2020-03-28T20:30:02.978761
| 2018-09-17T13:36:13
| 2018-09-17T13:36:13
| 149,076,438
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 300
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.post_list, name='post_list'),
path('post/<int:pk>/', views.post_detail, name='post_detail'),
path('post/new', views.post_new, name='post_new'),
path('post/<int:pk>/edit/', views.post_edit, name='post_edit'),
]
|
[
"jam2@jmail.plala.or.jp"
] |
jam2@jmail.plala.or.jp
|
0d2ea1c5f31a044d68ce7bb06f65aaa2ee8a1422
|
327981aeef801fec08305d70270deab6f08bc122
|
/13.tkinter与银行系统实战/thinker/18.Combobox下拉控件.py
|
0dc0692ce37fde2328c063bb484b27127a142176
|
[] |
no_license
|
AWangHe/Python-basis
|
2872db82187b169226271c509778c0798b151f50
|
2e3e9eb6da268f765c7ba04f1aefc644d50c0a29
|
refs/heads/master
| 2020-03-20T12:15:44.491323
| 2018-06-15T08:24:19
| 2018-06-15T08:24:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 612
|
py
|
# -*- coding: utf-8 -*-
import tkinter
from tkinter import ttk
#创建主窗口
win = tkinter.Tk()
#设置标题
win.title("魔兽世界")
#设置大小和位置 大小400x400 距离左侧400,距离上侧100
win.geometry("400x400+400+100")
#绑定变量
cv = tkinter.StringVar()
com = ttk.Combobox(win, textvariable = cv)
com.pack()
#设置下拉数据
com["value"] = ("济南", "青岛", "济宁")
#设置默认值
com.current(0)
#绑定事件
def func(event):
print(com.get())
print(cv.get())
com.bind("<<ComboboxSelected>>", func)
win.mainloop()
|
[
"huanji2209747841@foxmail.com"
] |
huanji2209747841@foxmail.com
|
2a976186b04e2414a02608208b9b889bdc4db0de
|
1b8fba01309da37f8d0ff408765c1d545fc588d6
|
/tests/data/test_d2go_datasets.py
|
8f5c1514bfeff7ecb756ed09dba2c86dcd1c1ecd
|
[
"Apache-2.0"
] |
permissive
|
supriyar/d2go
|
9bd54bcb2704c91d7bf0d5fceab2ac4f23d59346
|
9dc1600b05ecf60fab556599b4c0bc6c32837449
|
refs/heads/main
| 2023-08-11T16:19:50.578547
| 2021-10-01T17:43:32
| 2021-10-01T17:44:49
| 413,646,825
| 0
| 0
|
Apache-2.0
| 2021-10-05T02:20:59
| 2021-10-05T02:20:58
| null |
UTF-8
|
Python
| false
| false
| 10,262
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import json
import os
import unittest
import d2go.data.extended_coco as extended_coco
from d2go.data.keypoint_metadata_registry import (
KEYPOINT_METADATA_REGISTRY,
KeypointMetadata,
get_keypoint_metadata,
)
from d2go.data.utils import (
maybe_subsample_n_images,
AdhocDatasetManager,
COCOWithClassesToUse,
)
from d2go.runner import Detectron2GoRunner
from d2go.utils.testing.data_loader_helper import (
LocalImageGenerator,
create_toy_dataset,
)
from d2go.utils.testing.helper import tempdir
from detectron2.data import DatasetCatalog, MetadataCatalog
from mobile_cv.common.misc.file_utils import make_temp_directory
def create_test_images_and_dataset_json(data_dir, num_images=10, num_classes=-1):
# create image and json
image_dir = os.path.join(data_dir, "images")
os.makedirs(image_dir)
json_dataset, meta_data = create_toy_dataset(
LocalImageGenerator(image_dir, width=80, height=60),
num_images=num_images,
num_classes=num_classes,
)
json_file = os.path.join(data_dir, "{}.json".format("inj_ds1"))
with open(json_file, "w") as f:
json.dump(json_dataset, f)
return image_dir, json_file
class TestD2GoDatasets(unittest.TestCase):
def test_coco_conversions(self):
test_data_0 = {
"info": {},
"imgs": {
"img_1": {
"file_name": "0.jpg",
"width": 600,
"height": 600,
"id": "img_1",
}
},
"anns": {0: {"id": 0, "image_id": "img_1", "bbox": [30, 30, 60, 20]}},
"imgToAnns": {"img_1": [0]},
"cats": {},
}
test_data_1 = copy.deepcopy(test_data_0)
test_data_1["imgs"][123] = test_data_1["imgs"].pop("img_1")
test_data_1["imgs"][123]["id"] = 123
test_data_1["anns"][0]["image_id"] = 123
test_data_1["imgToAnns"][123] = test_data_1["imgToAnns"].pop("img_1")
for test_data, exp_output in [(test_data_0, [0, 0]), (test_data_1, [123, 123])]:
with make_temp_directory("detectron2go_tmp_dataset") as tmp_dir:
src_json = os.path.join(tmp_dir, "source.json")
out_json = os.path.join(tmp_dir, "output.json")
with open(src_json, "w") as h_in:
json.dump(test_data, h_in)
out_json = extended_coco.convert_coco_text_to_coco_detection_json(
src_json, out_json
)
self.assertEqual(out_json["images"][0]["id"], exp_output[0])
self.assertEqual(out_json["annotations"][0]["image_id"], exp_output[1])
def test_annotation_rejection(self):
img_list = [
{"id": 0, "width": 50, "height": 50, "file_name": "a.png"},
{"id": 1, "width": 50, "height": 50, "file_name": "b.png"},
]
ann_list = [
[
{
"id": 0,
"image_id": 0,
"category_id": 0,
"segmentation": [[0, 0, 10, 0, 10, 10, 0, 10]],
"area": 100,
"bbox": [0, 0, 10, 10],
},
{
"id": 1,
"image_id": 0,
"category_id": 0,
"segmentation": [[0, 0, 10, 0, 10, 10, 0, 10]],
"area": 100,
"bbox": [45, 45, 10, 10],
},
{
"id": 2,
"image_id": 0,
"category_id": 0,
"segmentation": [[0, 0, 10, 0, 10, 10, 0, 10]],
"area": 100,
"bbox": [-5, -5, 10, 10],
},
{
"id": 3,
"image_id": 0,
"category_id": 0,
"segmentation": [[0, 0, 10, 0, 10, 10, 0, 10]],
"area": 0,
"bbox": [5, 5, 0, 0],
},
{
"id": 4,
"image_id": 0,
"category_id": 0,
"segmentation": [[]],
"area": 25,
"bbox": [5, 5, 5, 5],
},
],
[
{
"id": 5,
"image_id": 1,
"category_id": 0,
"segmentation": [[]],
"area": 100,
"bbox": [0, 0, 0, 0],
},
],
]
out_dict_list = extended_coco.convert_to_dict_list(
"",
[0],
img_list,
ann_list,
)
self.assertEqual(len(out_dict_list), 1)
@tempdir
def test_coco_injection(self, tmp_dir):
image_dir, json_file = create_test_images_and_dataset_json(tmp_dir)
runner = Detectron2GoRunner()
cfg = runner.get_default_cfg()
cfg.merge_from_list(
[
str(x)
for x in [
"D2GO_DATA.DATASETS.COCO_INJECTION.NAMES",
["inj_ds1", "inj_ds2"],
"D2GO_DATA.DATASETS.COCO_INJECTION.IM_DIRS",
[image_dir, "/mnt/fair"],
"D2GO_DATA.DATASETS.COCO_INJECTION.JSON_FILES",
[json_file, "inj_ds2"],
]
]
)
runner.register(cfg)
inj_ds1 = DatasetCatalog.get("inj_ds1")
self.assertEqual(len(inj_ds1), 10)
for dic in inj_ds1:
self.assertEqual(dic["width"], 80)
self.assertEqual(dic["height"], 60)
@tempdir
def test_sub_dataset(self, tmp_dir):
image_dir, json_file = create_test_images_and_dataset_json(tmp_dir)
runner = Detectron2GoRunner()
cfg = runner.get_default_cfg()
cfg.merge_from_list(
[
str(x)
for x in [
"D2GO_DATA.DATASETS.COCO_INJECTION.NAMES",
["inj_ds3"],
"D2GO_DATA.DATASETS.COCO_INJECTION.IM_DIRS",
[image_dir],
"D2GO_DATA.DATASETS.COCO_INJECTION.JSON_FILES",
[json_file],
"DATASETS.TEST",
("inj_ds3",),
"D2GO_DATA.TEST.MAX_IMAGES",
1,
]
]
)
runner.register(cfg)
with maybe_subsample_n_images(cfg) as new_cfg:
test_loader = runner.build_detection_test_loader(
new_cfg, new_cfg.DATASETS.TEST[0]
)
self.assertEqual(len(test_loader), 1)
def test_coco_metadata_registry(self):
@KEYPOINT_METADATA_REGISTRY.register()
def TriangleMetadata():
return KeypointMetadata(
names=("A", "B", "C"),
flip_map=(
("A", "B"),
("B", "C"),
),
connection_rules=[
("A", "B", (102, 204, 255)),
("B", "C", (51, 153, 255)),
],
)
tri_md = get_keypoint_metadata("TriangleMetadata")
self.assertEqual(tri_md["keypoint_names"][0], "A")
self.assertEqual(tri_md["keypoint_flip_map"][0][0], "A")
self.assertEqual(tri_md["keypoint_connection_rules"][0][0], "A")
@tempdir
def test_coco_metadata_register(self, tmp_dir):
@KEYPOINT_METADATA_REGISTRY.register()
def LineMetadata():
return KeypointMetadata(
names=("A", "B"),
flip_map=(("A", "B"),),
connection_rules=[
("A", "B", (102, 204, 255)),
],
)
image_dir, json_file = create_test_images_and_dataset_json(tmp_dir)
runner = Detectron2GoRunner()
cfg = runner.get_default_cfg()
cfg.merge_from_list(
[
str(x)
for x in [
"D2GO_DATA.DATASETS.COCO_INJECTION.NAMES",
["inj_ds"],
"D2GO_DATA.DATASETS.COCO_INJECTION.IM_DIRS",
[image_dir],
"D2GO_DATA.DATASETS.COCO_INJECTION.JSON_FILES",
[json_file],
"D2GO_DATA.DATASETS.COCO_INJECTION.KEYPOINT_METADATA",
["LineMetadata"],
]
]
)
runner.register(cfg)
inj_md = MetadataCatalog.get("inj_ds")
self.assertEqual(inj_md.keypoint_names[0], "A")
self.assertEqual(inj_md.keypoint_flip_map[0][0], "A")
self.assertEqual(inj_md.keypoint_connection_rules[0][0], "A")
@tempdir
def test_coco_create_adhoc_class_to_use_dataset(self, tmp_dir):
image_dir, json_file = create_test_images_and_dataset_json(
tmp_dir, num_classes=2
)
runner = Detectron2GoRunner()
cfg = runner.get_default_cfg()
cfg.merge_from_list(
[
str(x)
for x in [
"D2GO_DATA.DATASETS.COCO_INJECTION.NAMES",
["test_adhoc_ds", "test_adhoc_ds2"],
"D2GO_DATA.DATASETS.COCO_INJECTION.IM_DIRS",
[image_dir, image_dir],
"D2GO_DATA.DATASETS.COCO_INJECTION.JSON_FILES",
[json_file, json_file],
]
]
)
runner.register(cfg)
# Test adhoc classes to use
AdhocDatasetManager.add(COCOWithClassesToUse("test_adhoc_ds", ["class_0"]))
ds_list = DatasetCatalog.get("test_adhoc_ds@1classes")
self.assertEqual(len(ds_list), 5)
# Test adhoc classes to use with suffix removal
AdhocDatasetManager.add(
COCOWithClassesToUse("test_adhoc_ds2@1classes", ["class_0"])
)
ds_list = DatasetCatalog.get("test_adhoc_ds2@1classes")
self.assertEqual(len(ds_list), 5)
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
c6e6e1ef088631e80462884b26b6f3bdfea593fb
|
77311ad9622a7d8b88707d7cee3f44de7c8860cb
|
/res/scripts/client/messenger/proto/bw_chat2/find_criteria.py
|
ea24304c044114f780246fdbce4279659cbd77a3
|
[] |
no_license
|
webiumsk/WOT-0.9.14-CT
|
9b193191505a4560df4e872e022eebf59308057e
|
cfe0b03e511d02c36ce185f308eb48f13ecc05ca
|
refs/heads/master
| 2021-01-10T02:14:10.830715
| 2016-02-14T11:59:59
| 2016-02-14T11:59:59
| 51,606,676
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 1,701
|
py
|
# 2016.02.14 12:42:53 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/messenger/proto/bw_chat2/find_criteria.py
from constants import PREBATTLE_TYPE
from messenger.ext import channel_num_gen
from messenger.m_constants import BATTLE_CHANNEL, PROTO_TYPE
from messenger.proto.interfaces import IEntityFindCriteria
class BWBattleChannelFindCriteria(IEntityFindCriteria):
def __init__(self):
super(BWBattleChannelFindCriteria, self).__init__()
self.__ids = []
for item in BATTLE_CHANNEL.ALL:
clientID = channel_num_gen.getClientID4BattleChannel(item.name)
if clientID:
self.__ids.append(clientID)
clientID = channel_num_gen.getClientID4Prebattle(PREBATTLE_TYPE.SQUAD)
if clientID:
self.__ids.append(clientID)
def filter(self, channel):
return channel.getProtoType() is PROTO_TYPE.BW_CHAT2 and channel.getClientID() in self.__ids
class BWPrebattleChannelFindCriteria(IEntityFindCriteria):
def filter(self, channel):
return channel.getProtoType() is PROTO_TYPE.BW_CHAT2 and channel.getPrebattleType()
class BWChatTypeFindCriteria(IEntityFindCriteria):
def __init__(self, chatType):
super(BWChatTypeFindCriteria, self).__init__()
self.__chatType = chatType
def filter(self, channel):
return channel.getProtoType() is PROTO_TYPE.BW_CHAT2 and channel.getProtoData().chatType == self.__chatType
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\messenger\proto\bw_chat2\find_criteria.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.02.14 12:42:53 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
c3272a6d17e416f40214a15000daead4cb89569e
|
7b9257e9fd5832323feb1cd4a2c8552edc937cf8
|
/ctw-baseline-master/ssd/caffe/examples/ssd_demo_patch_crop.py
|
2007c2e8ee9ed4fdd0a7417524979772e4f1435e
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"MIT",
"Apache-2.0"
] |
permissive
|
dodgaga/Keywords_CTW
|
4df027f80a3dd1aa72afd1e8b1faba3f6bafdff9
|
8e5c1f344606410c81e23bae1034ddb812fe849e
|
refs/heads/master
| 2020-03-11T07:45:13.850667
| 2018-04-18T02:59:30
| 2018-04-18T02:59:30
| 129,865,638
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,754
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import PIL.Image as Image
import cv2
#matplotlib inline
plt.rcParams['figure.figsize'] = (10, 10)
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Make sure that caffe is on the python path:
caffe_root = '/home/wudao/ctw/ctw-baseline-master/ssd/caffe' # this file is expected to be in {caffe_root}/examples
import os
os.chdir(caffe_root)
import sys
sys.path.insert(0, 'python')
#print os.getcwd()
import caffe
print os.getcwd()
caffe.set_device(5)
caffe.set_mode_gpu()
print os.getcwd()
from google.protobuf import text_format
from caffe.proto import caffe_pb2
# load PASCAL VOC labels
labelmap_file = '/home/wudao/ctw/ctw-baseline-master/ssd/caffe/data/VOC0712/labelmap_voc.prototxt'
file = open(labelmap_file, 'r')
labelmap = caffe_pb2.LabelMap()
text_format.Merge(str(file.read()), labelmap)
def get_labelname(labelmap, labels):
num_labels = len(labelmap.item)
labelnames = []
if type(labels) is not list:
labels = [labels]
for label in labels:
found = False
for i in xrange(0, num_labels):
if label == labelmap.item[i].label:
found = True
labelnames.append(labelmap.item[i].display_name)
break
assert found == True
return labelnames
model_def = 'models/VGGNet/VOC0712/SSD_300x300_ft/deploy.prototxt'
model_weights = 'models/VGGNet/VOC0712/SSD_300x300_ft/VGG_VOC0712_SSD_300x300_ft_iter_120000.caffemodel'
print os.getcwd()
net = caffe.Net(model_def, # defines the structure of the model
model_weights, # contains the trained weights
caffe.TEST) # use test mode (e.g., don't perform dropout)
# input preprocessing: 'data' is the name of the input blob == net.inputs[0]
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2, 0, 1))
transformer.set_mean('data', np.array([104,117,123])) # mean pixel
transformer.set_raw_scale('data', 255) # the reference model operates on images in [0,255] range instead of [0,1]
transformer.set_channel_swap('data', (2,1,0)) # the reference model has channels in BGR order instead of RGB
image_resize = 300
net.blobs['data'].reshape(1,3,image_resize,image_resize)
dirname = '/home/wudao/imagesnocrop/'
for maindir, subdir, file_name_list in os.walk(dirname):
for filename in file_name_list:
image_path = os.path.join(maindir, filename)
image = caffe.io.load_image(image_path)
#plt.imshow(image)
print os.getcwd()
transformed_image = transformer.preprocess('data', image)
net.blobs['data'].data[...] = transformed_image
# Forward pass.
detections = net.forward()['detection_out']
# Parse the outputs.
det_label = detections[0,0,:,1]
det_conf = detections[0,0,:,2]
det_xmin = detections[0,0,:,3]
det_ymin = detections[0,0,:,4]
det_xmax = detections[0,0,:,5]
det_ymax = detections[0,0,:,6]
# Get detections with confidence higher than 0.6.
top_indices = [i for i, conf in enumerate(det_conf) if conf >= 0.6]
top_conf = det_conf[top_indices]
num_region = top_conf.shape[0]
print("num_region",num_region)
if(num_region == 0):
continue
top_label_indices = det_label[top_indices].tolist()
top_animal_indices = [i for i, label in enumerate (top_label_indices) if (label == 3
or label == 8 or label ==10 or label==12 or label==13 or label==17)]
print("animal_indices",top_animal_indices)
# top_labels = get_labelname(labelmap, top_label_indices)
if (len(top_animal_indices) == 0):
continue
top_xmin = det_xmin[top_indices][top_animal_indices]
top_ymin = det_ymin[top_indices][top_animal_indices]
top_xmax = det_xmax[top_indices][top_animal_indices]
top_ymax = det_ymax[top_indices][top_animal_indices]
xmin = np.array(top_xmin * image.shape[1])
ymin = np.array(top_ymin * image.shape[0])
xmax = np.array(top_xmax * image.shape[1])
ymax = np.array(top_ymax * image.shape[0])
print("detection_box",xmin,ymin,xmax,ymax)
#choose the max region
if(num_region > 1):
width = np.array(xmax - xmin)
height = np.array(ymax - ymin)
size = width*height
max_index = np.where(size == np.max(size))
xmin = xmin[max_index]
ymin = ymin[max_index]
xmax = xmax[max_index]
ymax = ymax[max_index]
#im = cv2.imread(image_path)
im = Image.open(image_path)
print("top_xmin",top_xmin)
#box = (xmin,ymin,xmax,ymax)
width = xmax - xmin
height = ymax - ymin
center_x = [xmin + (xmax - xmin)/2]
center_y = [ymin + (ymax - ymin)/2]
board = max(width, height)
box = (max(0, center_x - board/2),
max(0, center_y - board/2),
min(image.shape[1], center_x + board/2),
min(image.shape[0], center_y + board/2))
print("box",box)
crop_image = im.crop(box)
resize_image = crop_image.resize((256,256))
#crop_image = im[box[1]:box[3], box[0]:box[2]]
#resize_image = cv2.resize(crop_image, (256,256), interpolation=cv2.INTER_CUBIC)
crop_image_path = os.path.join('/home/wudao/imagescrop', filename)
resize_image.save(crop_image_path)
#cv2.imwrite(crop_image_path,resize_image)
#colors = plt.cm.hsv(np.linspace(0, 1, 21)).tolist()
#plt.imshow(image)
|
[
"757480639@qq.com"
] |
757480639@qq.com
|
7382cf4ae940afcc100eb670337e2eca1f2ea8f5
|
a98e008946a5a89bfec7c8e0a2968ddef65e2e80
|
/extensions.py
|
9338da5da06b613b7f9db615c110b9e11bc08aca
|
[] |
no_license
|
Jyue0812/blog-flask
|
a978c19b1cf386258779e78a9e630fd73546c975
|
e3f309c5d4597cd44b73ae540eab6fdfa9f09df3
|
refs/heads/master
| 2020-03-22T09:29:36.212113
| 2018-07-05T11:40:14
| 2018-07-05T11:40:14
| 139,841,074
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 701
|
py
|
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_mail import Mail
from flask_moment import Moment
from flask_login import LoginManager
db = SQLAlchemy()
boot = Bootstrap()
migrate = Migrate(db=db)
mail = Mail()
moment = Moment()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
def config_extensions(app):
db.init_app(app)
boot.init_app(app)
migrate.init_app(app)
mail.init_app(app)
moment.init_app(app)
login_manager.init_app(app)
login_manager.login_view = 'user.login'
login_manager.login_message = '请登录后再访问'
|
[
"yueyuezhu@msn.com"
] |
yueyuezhu@msn.com
|
d13f8bea4ddda9ccb02c2181021c14da9b3fb1d8
|
f3c7ece759a1565256712e69ae6d1959f35a8b3a
|
/blog/models.py
|
eca982ccd765db0aa1ad2cebe47c979c5da5164f
|
[] |
no_license
|
sashoki/starRest
|
b08e1aa64803834ffbfc407f6c4c80531a242544
|
7de726009d182f0e5aa9fa2dfd62247bdcf726bb
|
refs/heads/master
| 2020-05-24T01:23:08.700483
| 2019-05-16T13:32:03
| 2019-05-16T13:32:03
| 187,033,899
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,090
|
py
|
# -*- coding: utf-8 -*-
from django.db import models
from django.contrib.auth.models import User
from django.contrib.auth.base_user import AbstractBaseUser
import uuid
from django.utils.safestring import mark_safe
from django.utils import timezone
# Create your models here.
class UserProfile(models.Model):
class Meta:
verbose_name = 'User'
verbose_name_plural = 'Users'
user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='User Profile')
logo = models.ImageField(null=True, blank=True, upload_to="logo/", verbose_name='Logo', help_text="50x50")
full_name = models.CharField(max_length=25, verbose_name='Full name')
uniq_identifi = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False,
verbose_name='Unique identificator') # primary_key = True,
def __str__(self):
return f'{self.full_name or self.uniq_identifi}'
def __unicode__(self):
return f'{self.full_name or self.uniq_identifi}'
def sender_email(obj):
return f'{obj.user.email}'
sender_email.short_description = 'E-mail'
def sender_user(obj):
return f'{obj.user.username}'
sender_user.short_description = 'Name'
def bit_logo(self):
if self.logo:
return mark_safe('<img src="%s" style="width: 30px; height:30px;" />' % self.logo.url)
else:
return 'Logo not selected'
bit_logo.short_description = 'Logo'
bit_logo.allow_tags = True
class Post(models.Model):
class Meta():
ordering = ['-post_date']
db_table = 'post'
verbose_name_plural = 'Post'
verbose_name = 'Posts'
post_title = models.CharField(max_length=200, verbose_name=u'Title')
post_text = models.TextField(null=True, blank=True, verbose_name=u'Text')
post_date = models.DateTimeField(default=timezone.now, verbose_name=u'Create data')
post_author = models.ForeignKey(UserProfile, on_delete=models.CASCADE, verbose_name=u'Post autor')
def __str__(self):
return self.post_title
|
[
"ideauspeha@gmail.com"
] |
ideauspeha@gmail.com
|
637f8a88898e01790740bce96fe0dfbce3ff45a5
|
e9946dc85edf828d0b40eb09eec1d834f5d07890
|
/print_test.py
|
cedb51604cc597958c89e3b6806dbf1cadf40c06
|
[] |
no_license
|
sillyer/learn-python-the-hard-way
|
07bc1386e316285402b4994395009b5bd5f47363
|
5d35130622e5ad87b1fcb4bbf46f5f31d60f05f2
|
refs/heads/master
| 2021-05-06T02:56:24.192844
| 2018-01-25T09:41:41
| 2018-01-25T09:41:41
| 114,712,099
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 29
|
py
|
print """
test
test
test
"""
|
[
"achen586@gmail.com"
] |
achen586@gmail.com
|
4fa8122715a8ceb6252aed6bc2fae2ba520cd07a
|
e8c2881e13557f4cfdefae3a11970cb71d956922
|
/tests/test_predictive_search.py
|
3918d55add7abe62a36d49d558de81481e519ea7
|
[
"BSD-3-Clause"
] |
permissive
|
oguna/pymigemo
|
a89dfbf8536d99769a187a0ebc4a7fec14531788
|
3168601b3f282dab67184d174418ae88e2f512da
|
refs/heads/master
| 2022-10-15T17:03:43.214804
| 2022-09-25T10:28:07
| 2022-09-25T10:28:07
| 255,638,701
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 579
|
py
|
from unittest import TestCase
import unittest
import migemo.romajiconverter
class TestPredictiveSearch(TestCase):
def test_convert_romaji_to_hiragana(self):
self.assertEqual('あ', migemo.romajiconverter.convert_romaji_to_hiragana('a'))
self.assertEqual('z', migemo.romajiconverter.convert_romaji_to_hiragana('z'))
self.assertEqual('あいうえお', migemo.romajiconverter.convert_romaji_to_hiragana('aiueo'))
self.assertEqual('ん', migemo.romajiconverter.convert_romaji_to_hiragana('n'))
if __name__ == '__main__':
unittest.main()
|
[
"nao.lk.118@gmail.com"
] |
nao.lk.118@gmail.com
|
8edf548db029dd530fa8bddd6f142a6ecd491f48
|
3dfb4ee39555b30e6e0c6fcdbef371864e69f694
|
/google-cloud-sdk/lib/googlecloudsdk/api_lib/dns/transaction_util.py
|
e0444226d947ade648184a8d0f468d647f579eed
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
MD-Anderson-Bioinformatics/NG-CHM_Galaxy
|
41d1566d5e60416e13e023182ca4351304381a51
|
dcf4886d4ec06b13282143ef795c5f0ff20ffee3
|
refs/heads/master
| 2021-06-02T21:04:12.194964
| 2021-04-29T14:45:32
| 2021-04-29T14:45:32
| 130,249,632
| 0
| 1
| null | 2020-07-24T18:35:21
| 2018-04-19T17:25:33
|
Python
|
UTF-8
|
Python
| false
| false
| 5,070
|
py
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper methods for record-set transactions."""
import os
from dns import rdatatype
from googlecloudsdk.api_lib.dns import import_util
from googlecloudsdk.api_lib.dns import util
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import exceptions as core_exceptions
from googlecloudsdk.core import resource_printer
from googlecloudsdk.third_party.apis.dns.v1 import dns_v1_messages as messages
import yaml
DEFAULT_PATH = 'transaction.yaml'
class CorruptedTransactionFileError(core_exceptions.Error):
def __init__(self):
super(CorruptedTransactionFileError, self).__init__(
'Corrupted transaction file.\n\n'
'Please abort and start a new transaction.')
def WriteToYamlFile(yaml_file, change):
"""Writes the given change in yaml format to the given file.
Args:
yaml_file: file, File into which the change should be written.
change: Change, Change to be written out.
"""
printer = resource_printer.YamlPrinter(yaml_file)
printer.AddRecord(change)
def _RecordSetsFromDictionaries(record_set_dictionaries):
"""Converts list of record-set dictionaries into list of ResourceRecordSets.
Args:
record_set_dictionaries: [{str:str}], list of record-sets as dictionaries.
Returns:
list of ResourceRecordSets equivalent to given list of yaml record-sets
"""
record_sets = []
for record_set_dict in record_set_dictionaries:
record_set = messages.ResourceRecordSet()
# Need to assign kind to default value for useful equals comparisons.
record_set.kind = record_set.kind
record_set.name = record_set_dict['name']
record_set.ttl = record_set_dict['ttl']
record_set.type = record_set_dict['type']
record_set.rrdatas = record_set_dict['rrdatas']
record_sets.append(record_set)
return record_sets
def ChangeFromYamlFile(yaml_file):
"""Returns the change contained in the given yaml file.
Args:
yaml_file: file, A yaml file with change.
Returns:
Change, the change contained in the given yaml file.
Raises:
CorruptedTransactionFileError: if the record_set_dictionaries are invalid
"""
try:
change_dict = yaml.safe_load(yaml_file) or {}
except yaml.error.YAMLError:
raise CorruptedTransactionFileError()
if (change_dict.get('additions') is None or
change_dict.get('deletions') is None):
raise CorruptedTransactionFileError()
change = messages.Change()
change.additions = _RecordSetsFromDictionaries(change_dict['additions'])
change.deletions = _RecordSetsFromDictionaries(change_dict['deletions'])
return change
def CreateRecordSetFromArgs(args):
"""Creates and returns a record-set from the given args.
Args:
args: The arguments to use to create the record-set.
Raises:
ToolException: If given record-set type is not supported
Returns:
ResourceRecordSet, the record-set created from the given args.
"""
rd_type = rdatatype.from_text(args.type)
if rd_type not in import_util.RDATA_TRANSLATIONS:
raise exceptions.ToolException(
'unsupported record-set type [{0}]'.format(args.type))
record_set = messages.ResourceRecordSet()
# Need to assign kind to default value for useful equals comparisons.
record_set.kind = record_set.kind
record_set.name = util.AppendTrailingDot(args.name)
record_set.ttl = args.ttl
record_set.type = args.type
record_set.rrdatas = args.data
if rd_type is rdatatype.TXT or rd_type is rdatatype.SPF:
record_set.rrdatas = [import_util.QuotedText(datum) for datum in args.data]
return record_set
class TransactionFile(object):
"""Context for reading/writing from/to a transaction file."""
def __init__(self, trans_file_path, mode='r'):
if not os.path.isfile(trans_file_path):
raise exceptions.ToolException(
'transaction not found at [{0}]'.format(trans_file_path))
self.__trans_file_path = trans_file_path
try:
self.__trans_file = open(trans_file_path, mode)
except IOError as exp:
msg = 'unable to open transaction [{0}] because [{1}]'
msg = msg.format(trans_file_path, exp)
raise exceptions.ToolException(msg)
def __enter__(self):
return self.__trans_file
def __exit__(self, typ, value, traceback):
self.__trans_file.close()
if typ is IOError or typ is yaml.YAMLError:
msg = 'unable to read/write transaction [{0}] because [{1}]'
msg = msg.format(self.__trans_file_path, value)
raise exceptions.ToolException(msg)
|
[
"rbrown@insilico.us.com"
] |
rbrown@insilico.us.com
|
2ab777a490e53a46f4e5f312be43543a21489dc7
|
0cf29e73d907911d13ffb75d97777052e19facd7
|
/examples/model_compress/pruning/v2/activation_pruning_torch.py
|
4e79bd5102da4e256d9fd92767607b1f8208aad8
|
[
"MIT"
] |
permissive
|
ultmaster/nni
|
e2a3db9ac794ffc0ee37bc6aec44fbee417b2040
|
68ca6f21ee0d163fa17c5e303f470da84c0c4c97
|
refs/heads/master
| 2023-07-06T01:39:28.068554
| 2022-03-07T08:22:50
| 2022-03-07T08:22:50
| 200,209,376
| 0
| 0
|
MIT
| 2022-07-20T02:44:26
| 2019-08-02T09:43:28
|
Python
|
UTF-8
|
Python
| false
| false
| 6,198
|
py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
'''
NNI example for supported ActivationAPoZRank and ActivationMeanRank pruning algorithms.
In this example, we show the end-to-end pruning process: pre-training -> pruning -> fine-tuning.
Note that pruners use masks to simulate the real pruning. In order to obtain a real compressed model, model speed up is required.
'''
import argparse
import sys
import torch
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import MultiStepLR
import nni
from nni.compression.pytorch import ModelSpeedup
from nni.compression.pytorch.utils.counter import count_flops_params
from nni.algorithms.compression.v2.pytorch.pruning.basic_pruner import ActivationAPoZRankPruner, ActivationMeanRankPruner
from pathlib import Path
sys.path.append(str(Path(__file__).absolute().parents[2] / 'models'))
from cifar10.vgg import VGG
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
g_epoch = 0
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./data', train=True, transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
]), download=True),
batch_size=128, shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
normalize,
])),
batch_size=128, shuffle=False)
def trainer(model, optimizer, criterion):
global g_epoch
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
if batch_idx and batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
g_epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
g_epoch += 1
def evaluator(model):
model.eval()
correct = 0.0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
acc = 100 * correct / len(test_loader.dataset)
print('Accuracy: {}%\n'.format(acc))
return acc
def optimizer_scheduler_generator(model, _lr=0.1, _momentum=0.9, _weight_decay=5e-4, total_epoch=160):
optimizer = torch.optim.SGD(model.parameters(), lr=_lr, momentum=_momentum, weight_decay=_weight_decay)
scheduler = MultiStepLR(optimizer, milestones=[int(total_epoch * 0.5), int(total_epoch * 0.75)], gamma=0.1)
return optimizer, scheduler
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='PyTorch Example for model comporession')
parser.add_argument('--pruner', type=str, default='apoz',
choices=['apoz', 'mean'],
help='pruner to use')
parser.add_argument('--pretrain-epochs', type=int, default=20,
help='number of epochs to pretrain the model')
parser.add_argument('--fine-tune-epochs', type=int, default=20,
help='number of epochs to fine tune the model')
args = parser.parse_args()
print('\n' + '=' * 50 + ' START TO TRAIN THE MODEL ' + '=' * 50)
model = VGG().to(device)
optimizer, scheduler = optimizer_scheduler_generator(model, total_epoch=args.pretrain_epochs)
criterion = torch.nn.CrossEntropyLoss()
pre_best_acc = 0.0
best_state_dict = None
for i in range(args.pretrain_epochs):
trainer(model, optimizer, criterion)
scheduler.step()
acc = evaluator(model)
if acc > pre_best_acc:
pre_best_acc = acc
best_state_dict = model.state_dict()
print("Best accuracy: {}".format(pre_best_acc))
model.load_state_dict(best_state_dict)
pre_flops, pre_params, _ = count_flops_params(model, torch.randn([128, 3, 32, 32]).to(device))
g_epoch = 0
# Start to prune and speedup
print('\n' + '=' * 50 + ' START TO PRUNE THE BEST ACCURACY PRETRAINED MODEL ' + '=' * 50)
config_list = [{
'total_sparsity': 0.5,
'op_types': ['Conv2d'],
}]
# make sure you have used nni.trace to wrap the optimizer class before initialize
traced_optimizer = nni.trace(torch.optim.SGD)(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
if 'apoz' in args.pruner:
pruner = ActivationAPoZRankPruner(model, config_list, trainer, traced_optimizer, criterion, training_batches=20)
else:
pruner = ActivationMeanRankPruner(model, config_list, trainer, traced_optimizer, criterion, training_batches=20)
_, masks = pruner.compress()
pruner.show_pruned_weights()
pruner._unwrap_model()
ModelSpeedup(model, dummy_input=torch.rand([10, 3, 32, 32]).to(device), masks_file=masks).speedup_model()
print('\n' + '=' * 50 + ' EVALUATE THE MODEL AFTER SPEEDUP ' + '=' * 50)
evaluator(model)
# Optimizer used in the pruner might be patched, so recommend to new an optimizer for fine-tuning stage.
print('\n' + '=' * 50 + ' START TO FINE TUNE THE MODEL ' + '=' * 50)
optimizer, scheduler = optimizer_scheduler_generator(model, _lr=0.01, total_epoch=args.fine_tune_epochs)
best_acc = 0.0
g_epoch = 0
for i in range(args.fine_tune_epochs):
trainer(model, optimizer, criterion)
scheduler.step()
best_acc = max(evaluator(model), best_acc)
flops, params, results = count_flops_params(model, torch.randn([128, 3, 32, 32]).to(device))
print(f'Pretrained model FLOPs {pre_flops/1e6:.2f} M, #Params: {pre_params/1e6:.2f}M, Accuracy: {pre_best_acc: .2f}%')
print(f'Finetuned model FLOPs {flops/1e6:.2f} M, #Params: {params/1e6:.2f}M, Accuracy: {best_acc: .2f}%')
|
[
"noreply@github.com"
] |
noreply@github.com
|
7309a10797c007334b7e4bcc6ecd7e2f83890ade
|
01c2a07a1c87ca3883cfe981a8abff7e5251b100
|
/game.py
|
924d1779b2dd5b7ccea225718c14388d0c41a7ab
|
[] |
no_license
|
Dayron124/Pictionary-Python
|
f741c75b7a2444c3f02bc7c700961a543f8f7c6b
|
4a79e963c3be324c4b514223962bd490e214066c
|
refs/heads/main
| 2023-08-20T23:22:39.790066
| 2021-10-27T09:19:58
| 2021-10-27T09:19:58
| 419,214,704
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,106
|
py
|
"""
Handles operations related to game and connections between, player, board, chat and round
"""
from .player import Player
from .board import Board
from .round import Round
class Game(object):
def __init__(self, id, players):
self.id = id
self.players = players
self.words_used = []
self.round = Round(self.get_word(), )
self.board = None
self.player_draw_ind = 0
self.start_new_round
def start_new_round(self):
self.round = Round(self.get_word(), self.players[self.player_draw_ind])
self.player_draw_ind += 1
if self.player_draw_ind >= len(self.players):
self.end_round()
self.end_game()
def player_guess(self, player, guess):
pass
def player_disconnected(self, player):
pass
def skip(self):
pass
def round_ended(self):
pass
def update_board(self):
pass
def end_game(self):
pass
def get_word(self):
#TODO get a list of words
pass
|
[
"darrenkeithwhite123@gmail.com"
] |
darrenkeithwhite123@gmail.com
|
39d2163f110b8219db0deb50a4510ce04ca481a4
|
ff4f3e8b39a136c92faffa2463cdf44d59665169
|
/lib/counterfc.py
|
3ca175a3ce1ed8b2fb951de31b0e966c4e4f343b
|
[] |
no_license
|
nirmalya-broad/PatHCap_PL
|
275d897fc153432d7b8f1e888bd6f242839bb6c1
|
a790c9c9423ea4eaf43b9a71573137e370ae9269
|
refs/heads/master
| 2022-12-20T07:53:44.829036
| 2020-09-30T22:04:49
| 2020-09-30T22:04:49
| 256,802,835
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,910
|
py
|
import re
from subprocess import call
from miscutils import get_pct
class CounterFC:
def __init__(self, cldict, sampd):
self.cldict = cldict
self.sampd = sampd
def get_reverse_dir(self, strand_dir):
reverse_dir = ''
if strand_dir == 'reverse':
reverse_dir = 'forward'
elif strand_dir == 'forward':
reverse_dir = 'reverse'
else:
raise ValueError('Illegal value of strand_dir: ' + strand_dir)
return reverse_dir
def get_s_val_sense_allseq(self):
cldict = self.cldict
strand_dir = cldict.strand_dir
s_val = ''
if strand_dir == 'reverse':
s_val = '2'
elif strand_dir == 'forward':
s_val = '1'
return s_val
def get_s_val_antisense_allseq(self):
cldict = self.cldict
strand_dir = cldict.strand_dir
s_val = ''
if strand_dir == 'reverse':
s_val = '1'
elif strand_dir == 'forward':
s_val = '2'
return s_val
def get_s_val(self, exp_dir):
s_val = ''
if exp_dir == 's':
s_val = self.get_s_val_sense_allseq()
elif exp_dir == 'as':
s_val = self.get_s_val_antisense_allseq()
return s_val
def exe_featureCounts(self, sample_id, ref_acc, outsorted, outdir, exp_dir):
cldict = self.cldict
ldelim = cldict.ldelim
Data_dir = cldict.Data_dir
s_val = self.get_s_val(exp_dir)
print("Using s_val: " + s_val + " for exp_dir: " + exp_dir)
featureCounts_str = cldict.featureCounts
countfile_str = outdir + ldelim + sample_id + "_" + ref_acc + "_" + exp_dir + ".counts"
patho_gff = Data_dir + ldelim + ref_acc + "_ALL.gff"
fc_command_s = featureCounts_str + " -s " + s_val + " -t feature -g full_tag -Q 0 -a " + patho_gff + " -o " + countfile_str + " " + outsorted
print("Starting featureCounts..")
print("fc_command_s: " + fc_command_s)
call(fc_command_s.split())
return countfile_str
@staticmethod
def combine_s_as(countfile_s_str, countfile_as_str, countfile_str):
inf_s = open(countfile_s_str, "r")
inf_as = open(countfile_as_str, "r")
outf = open(countfile_str, "w")
# ignore first line
inf_s.next()
#outf.write("# Combining the content of sense and antisense reads\n")
for line in inf_s:
outf.write(line)
inf_s.close()
inf_as.next()
inf_as.next()
for line2 in inf_as:
line3 = "AS_" + line2
outf.write(line3)
inf_as.close()
outf.close()
def load_metrics_from_sense_summary(self, sense_summary_str, metrics_str):
""" This function would open this file and would calculate the
total number of reads by summing up the values on the right hand side.
"""
sense_summary = open(sense_summary_str, "r")
metrics = open(metrics_str, "w")
# Ignore the first line
sense_summary.next()
total_count = 0
for line in sense_summary:
parts = line.split()
lcount = int(parts[-1])
total_count += lcount
metrics.write("metrics_type\tcount\n")
metrics.write("total_count\t" + str(total_count) + "\n")
sense_summary.close()
metrics.close()
return total_count
def load_metrics_from_counts(self, counts_str, metrics_str, total_count):
counts_file = open(counts_str, "r")
metrics_file = open(metrics_str, "a")
# Define the variables
cds_count = 0
igr_count = 0
ncrna_count = 0
rrna_count = 0
trna_count = 0
miscrna_count =0
as_cds_count = 0
as_igr_count = 0
as_ncrna_count = 0
as_rrna_count = 0
as_trna_count = 0
as_miscrna_count = 0
counts_file.next()
for line in counts_file:
parts = line.split()
tag = parts[0]
count_str = parts[-1]
lcount = int(count_str)
is_as = None
ltag = None
if tag.startswith("AS_"):
ltag = re.sub("^AS_", "", tag)
is_as = True
else:
ltag = tag
is_as = False
if ltag.startswith("CDS:"):
if is_as:
as_cds_count += lcount
else:
cds_count += lcount
elif ltag.startswith("rRNA:"):
if is_as:
as_rrna_count += lcount
else:
rrna_count += lcount
elif ltag.startswith("tRNA:"):
if is_as:
as_trna_count += lcount
else:
trna_count += lcount
elif ltag.startswith("ncRNA:"):
if is_as:
as_ncrna_count += lcount
else:
ncrna_count += lcount
elif ltag.startswith("IGR:"):
if is_as:
as_igr_count += lcount
else:
igr_count += lcount
elif ltag.startswith("misc_RNA:"):
if is_as:
as_miscrna_count += lcount
else:
miscrna_count += lcount
else:
raise ValueError('Unknown tag: ' + tag)
sense_frag = cds_count + igr_count + rrna_count + \
ncrna_count + trna_count + miscrna_count
antisense_frag = as_cds_count + as_igr_count + as_rrna_count + \
as_ncrna_count + as_trna_count + as_miscrna_count
aligned_frag = sense_frag + antisense_frag
unmapped_count = total_count - aligned_frag
metrics_file.write("aligned_frag\t%d\n" % aligned_frag)
aligned_frag_pct_of_total = get_pct(aligned_frag, total_count)
metrics_file.write("aligned_frag_pct_of_total\t%.4f\n" % aligned_frag_pct_of_total)
metrics_file.write("unmapped_count\t%d\n" % unmapped_count)
unmapped_count_pct_of_total = get_pct(unmapped_count, total_count)
metrics_file.write("unmapped_count_pct_of_total\t%.4f\n" % unmapped_count_pct_of_total)
metrics_file.write("sense_frag\t%d\n" % sense_frag)
sense_frag_pct_of_aligned = get_pct(sense_frag, aligned_frag)
metrics_file.write("sense_frag_pct_of_aligned\t%.4f\n" % sense_frag_pct_of_aligned)
self.print_feature_type("CDS", cds_count, as_cds_count, aligned_frag, metrics_file)
self.print_feature_type("IGR", igr_count, as_igr_count, aligned_frag, metrics_file)
self.print_feature_type("rRNA", rrna_count, as_rrna_count, aligned_frag, metrics_file)
self.print_feature_type("ncRNA", ncrna_count, as_ncrna_count, aligned_frag, metrics_file)
self.print_feature_type("tRNA", trna_count, as_trna_count, aligned_frag, metrics_file)
self.print_feature_type("miscRNA", miscrna_count, as_miscrna_count, aligned_frag, metrics_file)
counts_file.close()
metrics_file.close()
def print_feature_type(self, feature_name, count_s, count_as, aligned_frag, outfile):
count_total = count_s + count_as
feature_pct_of_aligned = get_pct(count_total, aligned_frag)
feature_sense_pct = get_pct(count_s, count_total)
outfile.write("%s_pct_of_aligned\t%.4f\n" % (feature_name, feature_pct_of_aligned))
outfile.write("%s_sense_pct\t%.4f\n" % (feature_name, feature_sense_pct))
def get_pct(numer, denom):
if denom == 0:
return 0.0
else:
lval = (numer * 100.0) / denom
return lval
def get_fc_metrics(self, sample_id, ref_acc, outdir):
# This function would look into two files, once sense metric file
# and another non-sense metric file and would create a final metric
# file similar to the one of the host side. Once that part is done
# we can combine the metric files to make the unified metric file.
cldict = self.cldict
ldelim = cldict.ldelim
sense_summary_str = outdir + ldelim + sample_id + "_" + ref_acc + \
"_s.counts.summary"
counts_str = outdir + ldelim + sample_id + "_" + ref_acc + ".counts"
metrics_str = outdir + ldelim + sample_id + "_" + ref_acc + ".metrics"
print("Building metrics for " + sample_id)
print("sense_summary_str: " + sense_summary_str)
print("counts_str: " + counts_str)
print("metrics_str: " + metrics_str)
total_count = self.load_metrics_from_sense_summary(sense_summary_str,
metrics_str)
self.load_metrics_from_counts(counts_str, metrics_str, total_count)
|
[
"nirmalya@broadinstitute.org"
] |
nirmalya@broadinstitute.org
|
d700bfe0470ed942dca42727b21481b2d69a4bcd
|
5e324af46c554b88b97ee26886b05c88457ff0f5
|
/franchises/models/franchise.py
|
8d73000fdaadc7d85bb373e0c6cadd7335661a11
|
[] |
no_license
|
doubleclickdetroit/dindintonight
|
1bda8851e49782d4dc16ca77d46e4b1f431c2b52
|
9769e1a96730b02511d25af8828b075dff5c35b5
|
refs/heads/master
| 2016-08-04T22:01:08.083566
| 2014-07-26T18:58:58
| 2014-07-26T18:58:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 927
|
py
|
from autoslug import AutoSlugField
from django.db import models
from django.db.models.signals import post_save
from core.models import BaseModel
class Franchise(BaseModel):
id = models.AutoField(primary_key=True)
owner = models.OneToOneField('users.User', related_name='franchise_owners')
slug = AutoSlugField(populate_from='name', unique=True, db_index=True)
name = models.CharField(max_length=255)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
app_label = 'franchises'
db_table = 'franchises'
verbose_name = 'Franchise'
verbose_name_plural = 'Franchises'
def __unicode__(self):
return '{0} {1}'.format(self.owner.first_name, self.owner.last_name)
def franchise_post_save_handler(sender, instance, **kwargs):
pass
post_save.connect(franchise_post_save_handler, sender=Franchise)
|
[
"rgarrison3@gmail.com"
] |
rgarrison3@gmail.com
|
f50c03106b11172d042019389b17effbb2734fd1
|
5480f743915225b4e4b158b789f6f34e7226a61c
|
/face.py
|
4c57dbb434c6b55f96595fb14476aca5183e2c95
|
[] |
no_license
|
istiakahmad/AI-based-online-exam
|
6ed4b25f06e743b30639fc4bcd0d4c2fd706a4be
|
c74260028cae48a048a265eb995ab9e9d47d890e
|
refs/heads/master
| 2020-06-10T17:20:46.040772
| 2019-06-25T11:00:40
| 2019-06-25T11:00:40
| 193,690,114
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,823
|
py
|
from __future__ import print_function
import face_recognition
import os
import glob
import cv2
import time
def facerecognitionFunction(username):
def scan_known_people(known_people_folder):
known_names = []
known_face_encodings = []
for file in known_people_folder:
basename = os.path.splitext(os.path.basename(file))[0]
if basename == username:
img = face_recognition.load_image_file(file)
encodings = face_recognition.face_encodings(img)
known_names.append(basename)
known_face_encodings.append(encodings[0])
return known_names, known_face_encodings
# Multiple Image read from Folder
path = os.path.join("image/", '*g')
known_people_folder = glob.glob(path)
known_face_names, known_face_encodings = scan_known_people(known_people_folder)
# Initialize some variables
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
count = time.strftime("%Y%m%d-%H%M%S")
flag = 0 # Identification Value
frame_image = face_recognition.load_image_file('./images/'+username+'.jpeg')
PATH_IMAGES_DIR = './data/' + username + '/'
try:
os.mkdir(PATH_IMAGES_DIR)
except FileExistsError:
pass
cv2.imwrite(PATH_IMAGES_DIR + str(count) + ".jpg", frame_image)
# Only process every other frame of video to save time
if process_this_frame:
flag = 0
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(frame_image)
try:
if len(face_locations) < 1:
string = 'No face Found!!! Please Come closure.'
print("Found {0} faces!".format(len(face_locations)))
except:
print("Error flash message")
face_encodings = face_recognition.face_encodings(frame_image, face_locations)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding, tolerance=0.55)
name = "Unknown"
# If a match was found in known_face_encodings, just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = known_face_names[first_match_index]
flag = 1
face_names.append(name)
print("Face Identified Value: " + str(flag) + ' name ' + name)
#distance = face_recognition.face_distance(known_face_encodings, face_encoding)
#print("distance of frame from camera: {}.".format(distance))
process_this_frame = not process_this_frame
|
[
"istiakahmad86@gmail.com"
] |
istiakahmad86@gmail.com
|
675216a738395696e32032cdaf76581e4a9e6f05
|
79de4db43dc6b1503b21927a6b019f44e6aec31e
|
/src/git_reports/analyser.py
|
42248ec3dde4a121257e666d74ff8e6941970af3
|
[
"MIT"
] |
permissive
|
premchalmeti/git-reports
|
608d7b00092dac643af4c13af4158140a4492c19
|
ed73bf9b4586b990e9792991dbae537aa8d7ad5c
|
refs/heads/master
| 2023-04-03T13:10:37.806117
| 2021-04-11T18:44:14
| 2021-04-11T18:44:14
| 356,949,638
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,820
|
py
|
#!/usr/bin/python
"""
The analyser is the entrypoint for the analysis
>>> analyser = GitAnalyser('repos/django')
>>> analyser.analyse()
>>> analyser.output_html('django_analysis.html')
"""
import git
import datetime as dt
import argparse
import json
__version__ = '0.0.1'
class AnalysisEmptyError(Exception):
pass
class GitAnalyser:
# DATE_FMT to be used for x-axis timeline in chart
DATE_FMT = '%b-%Y'
TEMPLATE_FILE = 'chart_template.html'
DATE_RANGE_FMT = '%d/%m/%Y'
def __init__(self, repo_path):
self.repo = git.Repo(repo_path)
self.filter_date_flag = False
self.start_date = dt.date(year=2005, month=1, day=1)
self.end_date = dt.date(year=2006, month=8, day=1)
self.filter_authors_flag = False
self.target_authors = []
self.cmt_dates = []
self.cmt_info = {}
self.final_graph_data = {}
self._tracked_commits = []
def set_date_range(self, start_date_str, end_date_str):
self.start_date = dt.datetime.strptime(start_date_str, self.DATE_RANGE_FMT).date()
self.end_date = dt.datetime.strptime(end_date_str, self.DATE_RANGE_FMT).date()
def _add_cmt_info(self, author, date_str, cmt_obj):
stats = cmt_obj.stats.total
insertions = stats.get('insertions', 0)
deletions = stats.get('deletions', 0)
# author-wise mapping
if author not in self.cmt_info:
self.cmt_info[author] = {}
if date_str not in self.cmt_info[author]:
self.cmt_info[author][date_str] = {
"commits": 1,
"insertions": insertions,
"deletions": deletions
}
else:
self.cmt_info[author][date_str]['commits'] += 1
self.cmt_info[author][date_str]['insertions'] += insertions
self.cmt_info[author][date_str]['deletions'] += deletions
def _is_analysis_empty(self):
return not self.cmt_info
def analyse_branch(self, branch):
for cmt in self.repo.iter_commits(rev=branch):
author = cmt.author.email
if self.filter_authors_flag and self.target_authors and author not in self.target_authors:
continue
cmt_date = cmt.committed_datetime.date()
if self.filter_date_flag and not (self.start_date <= cmt_date <= self.end_date):
continue
cmt_date_str = cmt_date.strftime(self.DATE_FMT)
if cmt.hexsha not in self._tracked_commits:
self._tracked_commits.append(cmt.hexsha)
self._add_cmt_info(author, cmt_date_str, cmt)
if cmt_date_str not in self.cmt_dates:
self.cmt_dates.append(cmt_date_str)
def analyse(self):
# iterate over all branch's commits and analyse commits
for (i, ref) in enumerate(self.repo.refs):
print(f'{i+1}/{len(self.repo.refs)}. Checking commits for {ref}')
self.analyse_branch(ref)
# sorted_dates = list(self.cmt_dates)
# sorted_dates.sort(key=lambda d: dt.datetime.strptime(d, self.DATE_FMT))
self.prepare_graph_data()
def prepare_graph_data(self):
if self._is_analysis_empty():
raise AnalysisEmptyError("No analysis data found")
series_data = []
authors = []
for (author, ci) in self.cmt_info.items():
authors.append(author)
commit_data = [
{
"name": "Commits",
"value": ci.get(cmt_date, {}).get('commits', 0),
"commits": ci.get(cmt_date, {}).get('commits', 0),
"insertions": ci.get(cmt_date, {}).get('insertions', 0),
"deletions": ci.get(cmt_date, {}).get('deletions', 0),
} for cmt_date in self.cmt_dates[::-1]
]
# use {}.update in javascript and only update name and data
series_data.append({
"name": author,
"type": 'bar',
"barGap": 0,
"emphasis": {
"focus": 'series'
},
"data": commit_data
})
self.final_graph_data = {
"title": f'"{self.cmt_dates[0]} - {self.cmt_dates[-1]} Commits"',
"legend_data": authors,
"xaxis_data": self.cmt_dates,
"series_data": json.dumps(series_data, indent=4)
}
def output_html(self, outfile):
print(f'Generating {outfile} file')
from jinja2 import Template
with open(self.TEMPLATE_FILE) as fd:
rendered_chart_file = Template(fd.read()).render(chart_data=self.final_graph_data)
with open(outfile, 'w') as fd:
fd.write(rendered_chart_file)
def output_json(self):
raise NotImplementedError("This function is not implemented yet")
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Analyse repository and generate analysis report in "+
"given html file"
)
parser.add_argument(
"repo_path",
help="Path of the git repository to use for analysis"
)
parser.add_argument(
"--out-file",
required=False,
help="The output file name",
default='git_analysis.html'
)
args = parser.parse_args()
try:
analyser = GitAnalyser(args.repo_path)
analyser.analyse()
analyser.output_html(args.out_file)
print('Done!')
except AnalysisEmptyError as exc:
print('[ ERROR ]', exc)
except git.exc.NoSuchPathError as exc:
print('[ ERROR ] Invalid Repository Path', args.repo_path)
except FileNotFoundError as exc:
print('[ ERROR ]', exc)
|
[
"premkumarchalmeti@gmail.com"
] |
premkumarchalmeti@gmail.com
|
4ca7a7a20e0f8722c532d08c2f10d8e3d551d3de
|
25743fdde4ffe3c09c2b38af42f6629018e2e024
|
/06-ClassesAndObjects/zad12.py
|
e8f0d1a137800cecf05124530ed4950413494ad5
|
[] |
no_license
|
kacper26/pp1
|
4f257a97e64a1479e945ac23b89edd56e97fa270
|
9c21c181fba5085f9c6b5a06c113f4cf0664ec0d
|
refs/heads/master
| 2020-09-14T02:03:02.944766
| 2020-01-31T22:49:24
| 2020-01-31T22:49:24
| 222,934,969
| 0
| 0
| null | 2019-11-20T12:42:23
| 2019-11-20T12:42:22
| null |
UTF-8
|
Python
| false
| false
| 490
|
py
|
class telewizory():
def __init__(self):
self.is_on = False
self.channel_no = 1
def set_channel
def on(self):
self.is_on = True
def off(self):
self.is_on = False
def show_status(self):
if self.is_on:
print(f'Telewizor jest załączony, kanał {self.channel_no}')
else:
print('Telewizor nie jest załączony')
t1 = telewizory()
t1.show_status()
t1.on()
t1.show_status()
t1.off()
|
[
"noreply@github.com"
] |
noreply@github.com
|
57a7975749775a7b7ebf56afc6c8a8f39f7abcc6
|
4fd86f15b85f03102975d508944ac331e26e1d07
|
/first/settings.py
|
409b98e7205f8e589a0be4889e9801f4ef19d7de
|
[] |
no_license
|
doha22/user-profile-API-
|
096e25b066646f090c8a14bfba197b79ba694496
|
364f3faaa86722efe536340dcfb0a77ac5d31dad
|
refs/heads/master
| 2020-06-12T19:24:59.306578
| 2019-09-08T21:17:26
| 2019-09-08T21:17:26
| 194,401,276
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,379
|
py
|
"""
Django settings for first project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'n9qkny5pez-^ur9g^lqr-828okz8j-gf63rqar!08*4rr2arp%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework.authtoken',
'rest_framework',
'quickstart'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'first.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'first.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
### write which user model will be used
AUTH_USER_MODEL = 'quickstart.UserProfile'
|
[
"noreply@github.com"
] |
noreply@github.com
|
c0f033a974bd1edfd8566b7f3bfe0a207bcaa701
|
9f954f8f194610431af79caee31f405289a0400e
|
/MongoDb/import_file_mongo.py
|
f92ecb952c8686d7bb03c9361ba4c6e6cbf346ed
|
[] |
no_license
|
sahilchilana/Innovaccer-elasticsearch-
|
c43f06e5a868ad531eb9758f2fab395bee390d8a
|
3bdf20254e3ea3a590856d734e10a88eabd2f6f8
|
refs/heads/master
| 2020-05-26T00:42:30.156986
| 2019-05-24T08:56:10
| 2019-05-24T08:56:10
| 188,053,590
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 236
|
py
|
import json
import pymongo
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["mydatabase"]
mycol = mydb["config_file"]
with open('config.json') as f:
file_data = json.load(f)
mycol.insert_one(file_data)
|
[
"noreply@github.com"
] |
noreply@github.com
|
58a5ffe0456fe028034da211b3db8c3daf7f4530
|
7642f70954b73aca0d56f03b3e3577ee5648c752
|
/ppm/settings.py
|
5bfa923c2b43f7b3d5b3ae1d8a2264a866af4505
|
[] |
no_license
|
alviandk/ppm
|
8e5dfb2ca9a98b460c9b0a71be68b5310ed56d87
|
eea4d37904f86b4ec9cded6091b89d18244b85a9
|
refs/heads/master
| 2021-01-10T21:05:22.931101
| 2014-11-13T09:24:36
| 2014-11-13T09:24:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,100
|
py
|
"""
Django settings for ppm project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_v)b*pi3yhflh(bvrrk+rq9*fm5=b+@yh03bdgb94h95+1=#w-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'inventory',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'ppm.urls'
WSGI_APPLICATION = 'ppm.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'inventory',
'USER' : 'root',
'PASSWORD' : '',
'HOST': '127.0.0.1',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
|
[
"alviandk@gmail.com"
] |
alviandk@gmail.com
|
a9973f9c03c4beed2ba9fd4aba1aa4f7f968f84e
|
a3934f43f3c1e0a332eba3c2dd648f5b02cf7be7
|
/StockAlert.py
|
04f58f777bb49c4d61ef3e6428a91e44ab3dd468
|
[] |
no_license
|
harpaulgill/stock-alert-app
|
457777d9162dd5fec457b3cd28cc77765239a848
|
37ce470b051812c63b0a6b272d020251bb67c085
|
refs/heads/master
| 2020-03-26T21:09:51.368071
| 2018-08-20T05:53:53
| 2018-08-20T05:53:53
| 145,370,415
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,748
|
py
|
from apiclient import discovery
from datetime import date
import httplib2
import requests
import schedule
import time
import Auth
import SendEmail
email = "Your email goes here"
apikey = 'Your API Key from Alpha Vantage goes here'
SCOPES = 'https://mail.google.com/'
# get a credentials.json file after making a new project on the google developer console
CLIENT_SECRET_FILE = 'credentials.json'
APPLICATION_NAME = 'StockAlert'
authInst = Auth.Auth(SCOPES, CLIENT_SECRET_FILE, APPLICATION_NAME)
credentials = authInst.get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('gmail', 'v1', http=http)
def get_labels():
results = service.users().labels().list(userId='me').execute()
labels = results.get('labels', [])
if not labels:
print('No labels found.')
else:
print('Labels:')
for label in labels:
print(label['name'])
def send_email(symbol, price, sma):
send_inst = SendEmail.SendEmail(service)
text = 'The price of {0} is {1} and has fallen below its 200 day simple moving average of {2}'.format(symbol, price, sma)
message = send_inst.create_message(email, email, 'Stock Alert', text)
send_inst.send_message('me', message)
return
def get_daily_price(func, symbol, outputsize, today):
payload = {'function': func, 'symbol': symbol, 'ouputsize': outputsize, 'apikey': apikey}
r = requests.get('https://www.alphavantage.co/query', params=payload)
parsed_json = r.json()
parsed_json = parsed_json['Time Series (Daily)'][today]['4. close']
return float(parsed_json)
def get_sma(func, symbol, interval, time_period, series_type, today):
payload = {'function': func, 'symbol': symbol, 'interval': interval, 'time_period': time_period,
'series_type': series_type, 'apikey': apikey}
r = requests.get('https://www.alphavantage.co/query', params=payload)
parsed_json = r.json()
parsed_json = parsed_json['Technical Analysis: SMA'][today]['SMA']
return float(parsed_json)
def check_price(symbol):
today = str(date.today())
sma = get_sma('SMA', symbol, 'daily', '200', 'close', today)
price = get_daily_price('TIME_SERIES_DAILY', symbol, 'compact', today)
if price < sma:
send_email(symbol, price, sma)
return
def stock_alert():
# use this function to run checkPrice with multiple different stocks
# also ensure this program will not run on the weekends when the markets are closed
if date.today().weekday() != 6 and date.today().weekday() != 7:
check_price('SPY')
check_price('DIA')
return
schedule.every().day.at("17:30").do(stock_alert)
while True:
schedule.run_pending()
time.sleep(10)
print('loop')
|
[
"harpaul_gill@hotmail.com"
] |
harpaul_gill@hotmail.com
|
a4dc197a980ff50aca387fdbd9c00f95a18b791e
|
b4ae43fc09d9cd6dfa415ecc5ed133b26e14f59f
|
/problems/975_Odd Even Jump.py
|
7faac1e1e288b22c21df3897c49be13a88e925ae
|
[] |
no_license
|
wallinslax/leetcode
|
aa1da5bfb256d50e6499666c3ede988abb3d911c
|
a0263e3f63505fc34d885be37afa0e5b375d811f
|
refs/heads/main
| 2023-04-01T15:27:34.407028
| 2021-04-17T12:09:34
| 2021-04-17T12:09:34
| 358,838,545
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,425
|
py
|
class Solution:
def jumpForward(self,arr,whereNowIdx,currentJumpIdx):
isNext = False
if currentJumpIdx%2 == 1: #Odd Jump: Find smallest bigger
#print('Odd Jump=',currentJumpIdx,'/whereNowIdx=',whereNowIdx)
minBigVal = max(arr[(whereNowIdx+1):])
minBigIdx = (whereNowIdx+1) + arr[(whereNowIdx+1):].index(minBigVal)
if arr[whereNowIdx] <= minBigVal:
isNext = True
for j in range(whereNowIdx+1,len(arr)):
if arr[whereNowIdx] <= arr[j] < minBigVal:
minBigVal = arr[j]
minBigIdx = j
#print('minBigIdx=',minBigIdx)
#print('minBigVal=',minBigVal)
whereNowIdx = minBigIdx
#else:
# print('no Next, isNext=',isNext)
else: # Even Jump: Find Largest smaller
#print('Even Jump',currentJumpIdx,'/whereNowIdx=',whereNowIdx)
maxSmlVal = min(arr[(whereNowIdx+1):])
maxSmlIdx = (whereNowIdx+1) + arr[(whereNowIdx+1):].index(maxSmlVal)
if arr[whereNowIdx] >= maxSmlVal:
isNext = True
for j in range(whereNowIdx+1,len(arr)):
if arr[whereNowIdx] >= arr[j] > maxSmlVal:
maxSmlVal = arr[j]
maxSmlIdx = j
#print('maxSmlIdx=',maxSmlIdx)
#print('maxSmlVal=',maxSmlVal)
whereNowIdx = maxSmlIdx
#else:
# print('no Next, isNext=',isNext)
return whereNowIdx, isNext
def oddEvenJumps(self, arr: List[int]) -> int:
goodIdx = [len(arr)-1]
for evaluatingIdx in range(len(arr)-1):
# Initialize Index
whereNowIdx = evaluatingIdx
currentJumpIdx = 1
isNext = True
# Evaluate if evaluatingIdx is Good
while whereNowIdx < len(arr) and isNext:
whereNowIdx, isNext = self.jumpForward(arr,whereNowIdx,currentJumpIdx)
if whereNowIdx == len(arr)-1:
goodIdx.append(evaluatingIdx)
break
else:
currentJumpIdx += 1
#print('goodIdx=',goodIdx)
return len(goodIdx)
|
[
"wallinslax@gmail.com"
] |
wallinslax@gmail.com
|
6a1f5cbfcf97bde6ef77c7abcaba2e04ef4898b1
|
d3c4a67dd0c4c6d715c6fbb594cfb18f3b2b9e16
|
/mdds_project/utils/extract_features_from_pdf.py
|
92eefefea41c9ff60fe1d7544699f44b65d64d30
|
[] |
no_license
|
wangyy010/mdds_project
|
1570f8d9206d9499401c0b56764395dce2b0c400
|
0d8df14780a633e116823a88703064f4f362b61d
|
refs/heads/master
| 2023-07-30T00:51:22.721334
| 2021-09-13T03:21:30
| 2021-09-13T03:21:30
| 405,816,021
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,691
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This is a simple file to extract features from pdfs,
and record them in a file
"""
import env
import mdds.detect_engines.pdf.lib.get_pdf_features as gpf
from mdds.lib.fileType import get_type
import os
import sys
import glob
import traceback
'''
def main(files, des):
for file in files:
try:
records = gpf.get_pdf_features(file)
gpf.record_features_to_file(records, des)
except:
traceback.print_exc()
'''
### RAISE_FILE = r'F:\MyFinalWork\Samples\Raise_Error_Samples\cve_2013_0640'
def main(dir, des):
for root, dirs, files in os.walk(dir):
for file in files:
filename = os.path.join(root, file)
print 'check file: %s' %(filename)
### gpf.pdf.logging.debug('check file: %s' %(filename))
try:
file_type, data = get_type(filename)
records = gpf.get_pdf_features(data, filename)
gpf.record_features_to_file(records, des)
except:
traceback.print_exc()
print 'pdf: %s error' %(filename)
### gpf.pdf.logging.debug('pdf: %s error' %(filename))
raise
### raise_file = os.path.join(RAISE_FILE, file)
### os.rename(filename, raise_file)
if __name__ == '__main__':
sour_dir = sys.argv[1]
des = sys.argv[2]
main(sour_dir, des)
'''
if os.path.isdir(sour_dir):
main(glob.glob(sour_dir+'\\*'), des)
elif os.path.isfile(sour_dir):
main(glob.glob(sour_dir), des)
else:
print sour_dir
'''
|
[
"915396549@qq.com"
] |
915396549@qq.com
|
6e612a774a20e51feed223e0a74a18ebcf53f4a2
|
76fa4bc242502bcd9dfe1053c964318b94acc6d8
|
/matplotlib bar chart/df_barplot.py
|
fc8ef89725b545217214b8af713ce4b4e05eb56a
|
[] |
no_license
|
phani-1995/Week3-python_libraries
|
720156098ccab5301a58e39a4dd7af5a19a08008
|
1347b8dfd4980b37471a54ce991c967fdcb32e2b
|
refs/heads/master
| 2021-04-01T17:42:54.855954
| 2020-03-23T06:50:18
| 2020-03-23T06:50:18
| 248,204,612
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 302
|
py
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data=np.array([[2,4,6,8,10],[4,2,4,2,2],[8,3,7,6,4],[5,4,4,4,3],[6,6,8,6,2]])
dataFrame=pd.DataFrame(data,columns=['a','b','c','d','e'], index=["Delhi",'Mumbai','Hyderabad','Pune','Bengalur'])
dataFrame.plot(kind='bar')
plt.show()
|
[
"phanindrajallavaram@gmail.com"
] |
phanindrajallavaram@gmail.com
|
fc0b1a61451fe1c4b893d8ea586e3c6d8e04d357
|
7b2a3ea853dc44aea204f02abedaad6a2029f4ff
|
/inv_test.py
|
46e208002c5331c95094449e682798e59a78e53a
|
[] |
no_license
|
NoisyLeon/SW4Py
|
7d45503282dc988b5f886c039706bd79fdd6b339
|
7029f18eb526bcb46b4aa244da1e088ca57a56aa
|
refs/heads/master
| 2020-12-22T14:57:11.265397
| 2016-12-20T18:27:18
| 2016-12-20T18:27:18
| 56,792,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 227
|
py
|
# import obspy
#
# net=obspy.core.inventory.network.Network('SW4', ftanparams=[])
# inv=obspy.core.inventory.inventory.Inventory(networks=[net],source='CU')
# sta=obspy.core.inventory.ftanparam.Station('aa',13,132.4214,0.0)
|
[
"lili.feng@colorado.edu"
] |
lili.feng@colorado.edu
|
80de0af3010fdd6ae8448a8c6658966704466678
|
30a08a021d019d96734308cfffc7923c59817ceb
|
/CMPE 321 Introduction to Database Systems/project 3 - DBtfiy Song Management App/routes/artist_route.py
|
aabef05a33f77418e0adffd4ec7d1d02f8dbe5f0
|
[] |
no_license
|
UkcaGreen/boun-university-projects
|
8b790b509f6e875e2418ebae4d2c29e505b22539
|
1d5fe0e514ed682bac7aff56524d362dc73da1f4
|
refs/heads/master
| 2023-01-06T01:39:58.067671
| 2020-11-09T19:40:35
| 2020-11-09T19:40:35
| 279,275,459
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 963
|
py
|
from flask import Blueprint, request, jsonify, redirect, url_for, session
from services.artist_service import ArtistService
artist_bp = Blueprint("artist_bp", __name__)
@artist_bp.route('/list', methods=["GET"])
def list():
return jsonify(ArtistService().list())
@artist_bp.route('/create', methods=["GET"])
def create():
context = request.args
return ArtistService().create(context)
@artist_bp.route('/delete', methods=["GET"])
def delete():
return ArtistService().delete()
@artist_bp.route('/login', methods=["POST"])
def login():
form = request.form
artist_info = ArtistService().login(form)
if artist_info is not None:
session["id"] = artist_info["id"]
session["name"] = artist_info["name"]
session["surname"] = artist_info["surname"]
session["type"] = "artist"
session["state"] = True
return redirect(url_for('index'))
else:
return redirect(url_for('login'))
|
[
"emilcan.arican@boun.edu.tr"
] |
emilcan.arican@boun.edu.tr
|
e283ce5088a7510584c25bdf3ab120ce9c5c0d0d
|
bf75b5c5d1c984dec08052d40a6dc524294767c8
|
/bookms/settings.py
|
763ccfef3acd9bc723f045a743aaf2b757298cea
|
[] |
no_license
|
kqjhuang/bookms
|
5a40cf37fb4045526824c71672436731286d38d8
|
f33f0695e628a4e7b44eba5f41bb8dbdc97ad363
|
refs/heads/master
| 2021-01-10T07:59:40.446030
| 2015-11-07T10:27:03
| 2015-11-07T10:27:03
| 45,731,728
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,813
|
py
|
"""
Django settings for bookms project.
Generated by 'django-admin startproject' using Django 1.8.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'yh3(yn^)ly+t01tr*i*gtj_drm8o0fpnt^_1ge+t*1p*98p_vq'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bookapp',
'registDemo',
'django-groundwork',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'bookms.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bookms.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'mytest',
'USER': 'root',
'PASSWORD': '',
'HOST': '',
'PORT': '3306',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'zh-CN'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
|
[
"252331757@qq.com"
] |
252331757@qq.com
|
bce61880051656c6001a8447aca35b0b1e98cdcc
|
dc52081d4b06729731d048f7ede069950dbc00ed
|
/common_words.py
|
282cb4345619e05a1406fa41da737d760b578659
|
[] |
no_license
|
gigimic/topic_modeling
|
a868975fabfaba7194e72da051faffda8d4c5bb7
|
96c83b690890d1c4c91b1b004ec4f48fcb142bf1
|
refs/heads/master
| 2023-07-25T21:02:35.898534
| 2021-09-06T06:38:30
| 2021-09-06T06:38:30
| 286,437,604
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,069
|
py
|
import pandas as pd
from collections import Counter
def most_common_words(data_dtm):
data_dtm = data_dtm.transpose()
top_dict = {}
for c in data_dtm.columns:
top = data_dtm[c].sort_values(ascending=False).head(20)
top_dict[c]= list(zip(top.index, top.values))
# print('printing top words...')
# print(top_dict[c])
# print(top_dict)
# Print the top 10 words from each text
for topic, top_words in top_dict.items():
print(topic)
print(', '.join([word for word, count in top_words[0:9]]))
print('---')
# If there are common words in all the topics which appear many times they can be removed.
# Look at the most common top words --> add them to the stop word list
# Let's first pull out the top 10 words for each topic
words = []
for topic in data_dtm.columns:
top = [word for (word, count) in top_dict[topic]]
for t in top:
words.append(t)
# print(words)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
[
"gigimic@gmail.com"
] |
gigimic@gmail.com
|
d2f2f2dab7de0ca51232c4c3f8729a4cfc3ff734
|
ad3b09848dcec84db8dbd16fa1b4d4838551d180
|
/app.py
|
680a6ffb78184b1ceb3729394f1e3276132e622e
|
[] |
no_license
|
Vincentzyx/simple-video-website-backend
|
f00a7e85bf9cc55648a39302af5e2922400510ec
|
4dc8e58f7345de58cece6f9e83ce7aa44c12cfdd
|
refs/heads/master
| 2023-05-13T01:41:13.754072
| 2020-02-16T17:00:26
| 2020-02-16T17:00:26
| 240,929,744
| 1
| 0
| null | 2023-05-01T21:20:16
| 2020-02-16T16:59:31
|
Python
|
UTF-8
|
Python
| false
| false
| 11,641
|
py
|
import os
from flask import Flask, request, session, send_from_directory
from flask_cors import CORS
from functools import wraps
from werkzeug.utils import secure_filename
import json
from DatabaseAPI_User import UserAPI
from DatabaseAPI_Video import VideoAPI
from Exceptions import vException
import Utils
import VideoImportTool
app = Flask(__name__)
app.secret_key = ' A\xcd!x\xa6a\xffS\xcc\xc9\xdf?\x15\xd7\xbb\xdf\x0b\x9f\x1cy\xdcb\x8b'
app.config['UPLOAD_FOLDER'] = 'static/uploads'
CORS(app)
def json_response(func):
@wraps(func)
def decorated(*args, **kwargs) -> str:
res = {
"code": 0,
"msg": ""
}
data = None
try:
data = func(*args, **kwargs)
except vException as e:
res["code"] = e.args[0]
res["msg"] = e.args[1]
if data is not None:
res.update({
"data": data
})
return json.dumps(res, default=str, ensure_ascii=False)
return decorated
@app.route("/")
@json_response
def index():
if "account" in session:
return "logged in as " + str(session["account"]["username"])
else:
return "you are not logged in"
def isLogin():
if "account" in session:
return True
else:
return False
@app.route("/register", methods=['GET', 'POST'])
@json_response
def user_register():
if "username" in request.form and "email" in request.form and "password" in request.form:
try:
reg_result = UserAPI.UserRegister(
username=request.form["username"],
email=request.form["email"],
pwd=request.form["password"]
)
except vException as e:
raise e
else:
raise vException(-1, "Invalid args. Arguments: username email password are required.")
@app.route("/login", methods=['GET', 'POST'])
@json_response
def user_login():
if "username" in request.form and "password" in request.form:
try:
login_result = UserAPI.UserLogin(
username=request.form["username"],
pwd=request.form["password"]
)
except vException as e:
raise e
session["account"] = login_result
session["isLogin"] = True
return login_result
else:
raise vException(-1, "Invalid args. Arguments: username password are required.")
@app.route("/logout")
@json_response
def user_logout():
session.clear()
return True
@app.route("/login-state")
@json_response
def get_login_state():
if "account" in session:
return session["account"]
else:
raise vException(-1, "Please login.")
@app.route("/check-usability")
@json_response
def check_usability():
rValue = {}
if "username" in request.args:
userExists = UserAPI.CheckUsernameExists(request.args.get("username"))
rValue.update({
"username": not userExists
})
if "email" in request.args:
emailExists = UserAPI.CheckEmailExists(request.args.get("email"))
rValue.update({
"email": not emailExists
})
return rValue
@app.route("/video-url")
@json_response
def get_video_url():
if "vid" in request.args:
try:
urls = VideoAPI.GetVideoUrls(request.args.get("vid"))
urlsOut = {}
for url in urls:
urlsOut.update({url.name: url.url})
return urlsOut
except vException as e:
raise e
else:
raise vException(-1, "Invalid args. Argument: Vid is required.")
@app.route("/video-thumbnail")
@json_response
def get_video_thumbnail():
if "vid" in request.args:
thumbnail = VideoAPI.GetVideoThumbnail(request.args.get("vid"))
return thumbnail
else:
raise vException(-1, "Invalid args. Argument: Vid is required")
@app.route("/video-list")
@json_response
def get_video_list():
count = 20
page = 1
if "count" in request.args:
count = request.args.get("count")
if "page" in request.args:
page = request.args.get("page")
data = VideoAPI.GetVideoList(count, page)
infoOut = []
for info in data:
infoDict = info.toDict()
author = UserAPI.GetUserInfo(infoDict["author"])
videoThumbnail = VideoAPI.GetVideoThumbnail(info.vid)
infoDict.update({
"thumbnail": videoThumbnail["url"],
"author": author
})
infoOut.append(infoDict)
return infoOut
@app.route("/video-info")
@json_response
def get_video_info():
count = 20
if "vid" in request.args:
data = VideoAPI.GetVideoInfo(request.args["vid"])
try:
videoThumbnail = VideoAPI.GetVideoThumbnail(data.vid)
except vException as ex:
videoThumbnail = {}
videoUrls = VideoAPI.GetVideoUrls(data.vid)
authorInfo = UserAPI.GetUserInfo(data.author)
authorInfo.pop("email")
followState = False
likeState = False
starState = False
if "account" in session:
uid_str = str(session["account"]["uid"])
vid_str = str(data.vid)
followState = UserAPI.CheckFollowState(uid_str, str(authorInfo["uid"]))
likeState = VideoAPI.CheckLikeState(uid_str, vid_str)
starState = VideoAPI.CheckStarState(uid_str, vid_str)
data = data.toDict()
urls = []
for url in videoUrls:
urls.append(url.toDict())
authorInfo.update({
"isFollow": followState,
})
data.update({
"thumbnail": videoThumbnail["url"],
"urls": urls,
"author": authorInfo,
"isLike": likeState,
"isStar": starState
})
return data
else:
raise vException(-1, "Invalid args. Argument: Vid is required")
@app.route("/video-play")
@json_response
def video_play():
if "vid" in request.args:
VideoAPI.PlayVideo(request.args.get("vid"))
return True
def allowed_file(filename):
ALLOWED_EXTENSIONS = {'pdf', 'png', 'jpg', 'jpeg', 'gif'}
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route("/user-info")
@json_response
def user_info():
if "uid" in request.args:
info = UserAPI.GetUserInfo(request.args.get("uid"))
info.pop("email")
followInfo = UserAPI.GetUserFollowCount(request.args.get("uid"))
info.update(
followInfo
)
return info
else:
raise vException(-1, "Invalid args. Argument: Uid is required")
@app.route("/description-modify")
@json_response
def change_description():
if "description" in request.args:
if len(request.args.get("description")) <= 30:
UserAPI.ChangeDescription(str(session["account"]["uid"]), request.args.get("description"))
session["account"]["description"] = request.args.get("description")
return request.args.get("description")
else:
raise vException(-2, "Description is too long")
else:
raise vException(-1, "Invalid args. Argument: description is required")
@app.route('/avatar-upload', methods=['POST'])
@json_response
def upload_avatar():
if "account" in session:
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
raise vException(-1, "File not found")
file = request.files['file']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
raise vException(-2, "Filename is empty.")
if file and allowed_file(file.filename):
ext = file.filename.split('.')[-1]
random_filename = Utils.gen_str()
file_name = random_filename + "." + ext
file.save(os.path.join("static/avatar", file_name))
UserAPI.ChangeAvatar(str(session["account"]["uid"]), "/avatar/" + file_name)
session["account"]["avatar"] = "/avatar/" + file_name
session.modified = True
return {
"avatar": "/avatar/" + file_name
}
else:
raise vException(-3, "Please logged in.")
@app.route("/user-follow")
@json_response
def user_follow():
if "uid" in request.args and "follow" in request.args:
if request.args.get("follow") == "true":
UserAPI.Follow(str(session["account"]["uid"]), request.args.get("uid"))
else:
UserAPI.UnFollow(str(session["account"]["uid"]), request.args.get("uid"))
else:
raise vException(-1, "Invalid args. Argument: uid, follow is required")
@app.route("/user-like")
@json_response
def user_like():
if "vid" in request.args and "like" in request.args:
if request.args.get("like") == "true":
VideoAPI.Like(str(session["account"]["uid"]), request.args.get("vid"))
else:
VideoAPI.UnLike(str(session["account"]["uid"]), request.args.get("vid"))
else:
raise vException(-1, "Invalid args. Argument: vid, like is required")
@app.route("/user-star")
@json_response
def user_star():
if "vid" in request.args and "star" in request.args:
if request.args.get("star") == "true":
VideoAPI.Star(str(session["account"]["uid"]), request.args.get("vid"))
else:
VideoAPI.UnStar(str(session["account"]["uid"]), request.args.get("vid"))
else:
raise vException(-1, "Invalid args. Argument: vid, star is required")
@app.route("/video-comments")
@json_response
def show_comments():
count = "30"
page = "1"
if "vid" in request.args:
if "count" in request.args:
if request.args.get("count").isdecimal():
count = request.args.get("count")
if "page" in request.args:
if request.args.get("page").isdecimal():
page = request.args.get("page")
commentsInfo = VideoAPI.ShowComment(request.args.get("vid"), count, page)
comments = []
for c in commentsInfo:
comment = c
userInfo = UserAPI.GetUserInfo(str(c["uid"]))
userInfo.pop("email")
comment.update({
"user": userInfo
})
comments.append(comment)
return comments
else:
raise vException(-1, "Invalid args. Argument: vid is required")
@app.route("/send-comment")
@json_response
def add_comment():
if "account" in session:
if "vid" in request.args and "text" in request.args:
if len(request.args.get("text")) <= 300:
VideoAPI.AddComment(request.args.get("vid"), str(session["account"]["uid"]), "-1", request.args.get("text"))
else:
raise vException(-3, "Comment is too long")
else:
raise vException(-2, "Invalid args. Arguments: vid, text is required")
else:
raise vException(-1, "Please login")
@app.route("/videos/<path:path>")
def send_video(path):
return send_from_directory("static/videos", path)
@app.route("/video_thumbnail/<path:path>")
def send_video_thumbnail(path):
return send_from_directory("static/video_thumbnail", path)
@app.route("/avatar/<path:path>")
def send_avatar(path):
return send_from_directory("static/avatar", path)
with app.test_request_context():
pass
if __name__ == '__main__':
app.run()
|
[
"929403983@qq.com"
] |
929403983@qq.com
|
ba9fbe6b1b7b02c039189655a0a37f92ab7d2b20
|
74bb22915904542c6297a50ffac9b3e3902941bc
|
/MyPython/somePy/Mygrade.py
|
f2948741ceec568d582b09fab631cc4fabc0a142
|
[] |
no_license
|
ahathe/some-a-small-project
|
405d2ec25c6524b5dcf7df65ab103dad301438b8
|
fdf86c96a302703a0f90a1224374bbcd59524f63
|
refs/heads/master
| 2021-06-25T17:54:57.774366
| 2017-09-05T01:18:11
| 2017-09-05T01:18:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 835
|
py
|
#!usr/bin/ env python
'the is grade count,haha,lol!'
#score = int(raw_input("please,input you want to view score number!:"))
def View(number):
if number <= 100 and number >= 90:
print 'you score is good A!'
elif number <= 89 and number >= 80:
print 'you score is good B!'
elif number <= 79 and number >= 70:
print 'you score is little Better C!'
elif number <= 69 and number >= 60:
print 'you score is bad D!'
elif number >= 0 and number <= 59:
print 'you score is so bad F!'
else:
print 'plaese input number between 0~100 view you score,thank you for you try again!'
while True:
try:
score = int(raw_input("please,input you want to view score number!,between 0~100,thank you!:"))
except ValueError,e:
print 'thes value is illegality,plaese input int() type number,thank you!',e
else:
View(score)
break
|
[
"1136334598@qq.com"
] |
1136334598@qq.com
|
a2a7ac956af02e266ad5286d8ce10398186260cf
|
acaa2ed02339db8bf628c02a29146d2d9f699f3b
|
/W17B-Git_Revue/src/error.py
|
aa3a68aacf88e66490708e4ecfa4fd7c718b60af
|
[] |
no_license
|
jiaqizhu22/COMP1531
|
5c87411ba7ab9d3af597e6db852fc7d4a73109f9
|
29984ba66ad10a621519850537c2908d9d840384
|
refs/heads/master
| 2023-01-12T20:43:43.652844
| 2020-11-20T19:46:27
| 2020-11-20T19:46:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 256
|
py
|
#pylint: disable = missing-docstring
from werkzeug.exceptions import HTTPException
class AccessError(HTTPException):
code = 400
message = 'No message specified'
class InputError(HTTPException):
code = 400
message = 'No message specified'
|
[
"noreply@github.com"
] |
noreply@github.com
|
3e99c5d34a018a27eeab7d2575cd258a0647714d
|
96ead5ea87bd855a414244c74a9332377209615f
|
/app.py
|
976508d1cab1a079e1c725f0c77dcde6bac6fa3f
|
[] |
no_license
|
remi95/Http-Status
|
84356e739f7bb4f2234d078a3deff3c825477874
|
93592b50e4c0e1177f05650d5f3e8e920166ad89
|
refs/heads/master
| 2020-03-09T07:27:44.394793
| 2018-04-14T20:54:48
| 2018-04-14T20:54:48
| 128,665,125
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,536
|
py
|
#!/usr/bin/python3.5
# -*- coding:utf-8 -*-
from flask import Flask, flash, render_template, request, g, session, redirect, url_for
import mysql.connector, hashlib, urllib, datetime, time, os, telegram
from mysql.connector import Error
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.interval import IntervalTrigger
from slackclient import SlackClient
app = Flask(__name__)
app.config.from_object('secret_config')
# --------------------------------------------------
# ------------------- Functions --------------------
# --------------------------------------------------
def connect_db () :
g.mysql_connection = mysql.connector.connect(
host = app.config['DATABASE_HOST'],
user = app.config['DATABASE_USER'],
password = app.config['DATABASE_PASSWORD'],
database = app.config['DATABASE_NAME']
)
g.mysql_cursor = g.mysql_connection.cursor()
return g.mysql_cursor
def get_db () :
if not hasattr(g, 'db') :
g.db = connect_db()
return g.db
def newConnectDb() :
mysql_connection = mysql.connector.connect(
host = app.config['DATABASE_HOST'],
user = app.config['DATABASE_USER'],
password = app.config['DATABASE_PASSWORD'],
database = app.config['DATABASE_NAME']
)
return mysql_connection
def getHttpCode (url) :
try :
request = urllib.request.urlopen(url)
return request.getcode()
except urllib.error.HTTPError as error :
return error.code
except urllib.error.URLError as error :
return error.reason
def getStatus(sites) :
status = []
for site in sites :
status.append(getHttpCode(site[1]))
return status
def insertStatusCode () :
db = newConnectDb()
cursor = db.cursor()
cursor.execute("SELECT id, url FROM sites")
sites = cursor.fetchall()
date = datetime.datetime.now()
for site in sites:
code = getHttpCode(site[1])
sql = "INSERT INTO status VALUES(DEFAULT, %s, %s, %s)"
cursor.execute(sql, (site[0], code, date))
db.commit()
if code != 200 and isDown(site[0], 3) :
sendMessages(site[0], code)
cursor.close()
db.close()
def sendMessages(siteId, code, antiSpam = True) :
if antiSpam :
db = newConnectDb()
cursor = db.cursor()
cursor.execute("SELECT * FROM sites WHERE id = %(siteId)s", ({'siteId': siteId}))
data = cursor.fetchone()
now = datetime.datetime.now()
if data[2] != None :
diff = now - data[2]
days, seconds = diff.days, diff.seconds
hours = days * 24 + seconds // 3600
minutes = (seconds % 3600) // 60
if data[2] == None or hours >= 2 :
sendSlackMessage(data[1], code)
sendTelegramMessage(data[1], code)
cursor.execute("UPDATE sites SET last_message = %(date)s WHERE id = %(id)s", ({'date': now, 'id': siteId}))
db.commit()
cursor.close()
db.close()
else :
sendSlackMessage(data[1], code)
sendTelegramMessage(data[1], code)
def sendSlackMessage (url, code) :
slack_token = app.config['SLACK_TOKEN']
sc = SlackClient(slack_token)
sc.api_call(
"chat.postMessage",
channel=app.config['SLACK_CHANNEL'],
text="Bonjour, il semblerait qu'il y ai un problème avec votre site "+ url +". Statut : "+ str(code)
)
def sendTelegramMessage (url, code) :
telegram_token = app.config['TELEGRAM_TOKEN']
bot = telegram.Bot(token=telegram_token)
bot.send_message(chat_id=app.config['TELEGRAM_CHAT_ID'], text="Bonjour, il semblerait qu'il y ai un problème avec votre site "+ url +". Statut : "+ str(code))
def isDown (siteId, limit) :
db = newConnectDb()
cursor = db.cursor()
cursor.execute("SELECT * FROM status WHERE site_id = %(id)s ORDER BY date DESC LIMIT %(limit)s", {'id': siteId, 'limit': limit})
lastStatus = cursor.fetchall()
cursor.close()
db.close()
for status in lastStatus :
if status[2] == 200 :
return False
return True
# Ferme les BDD à la fin de l'execution
@app.teardown_appcontext
def close_db (error) :
if hasattr(g, 'db') :
g.db.close()
# --------------------------------------------------
# ---------------------- Routes --------------------
# --------------------------------------------------
@app.route('/')
def index () :
if session.get('user') :
user = session['user']
else :
user = False
db = get_db()
db.execute("SELECT id, url FROM sites")
sites = db.fetchall()
status = getStatus(sites)
return render_template('home.html.j2', user = user, sites = sites, status = status)
@app.route('/login/', methods = ['GET', 'POST'])
def login () :
name = str(request.form.get('name'))
password = str(request.form.get('password'))
db = get_db()
db.execute('SELECT name, password FROM user WHERE name = %(name)s', {'name' : name})
users = db.fetchall()
valid_user = False
for user in users :
if hashlib.sha256(password.encode('ascii')).hexdigest() == user[1]:
valid_user = user
if valid_user :
session['user'] = valid_user
return redirect(url_for('admin'))
return render_template('login.html.j2')
@app.route('/logout/')
def logout () :
session.clear()
return redirect(url_for('index'))
@app.route('/admin/')
def admin () :
if not session.get('user') or not session.get('user')[1] :
return redirect(url_for('login'))
db = get_db()
db.execute("SELECT id, url FROM sites")
sites = db.fetchall()
status = getStatus(sites)
return render_template('admin.html.j2', user = session['user'], sites = sites, status = status)
@app.route('/admin/add/', methods = ['GET', 'POST'])
def add () :
if not session.get('user') or not session.get('user')[1] :
return redirect(url_for('login'))
url = request.form.get('url')
if url is not None:
db = get_db()
if type(getHttpCode(url)) is int :
db.execute("INSERT INTO sites(id, url, last_message) VALUES(DEFAULT, '%s', NULL)"%(str(url)))
g.mysql_connection.commit()
flash("Le site " + url + " a bien été ajouté à la liste")
return redirect(url_for('admin'))
else :
flash("Le site " + url + " ne semble pas exister ou n'autorise pas que vous l'analysiez")
return render_template('add.html.j2', user = session['user'])
@app.route('/admin/edit/<int:id>', methods = ['GET', 'POST'])
def edit (id) :
if not session.get('user') or not session.get('user')[1] :
return redirect(url_for('login'))
db = get_db()
url = request.form.get('url')
db.execute("SELECT url FROM sites WHERE id = %s"%(id))
site = db.fetchone()
if url is None:
if site is not None :
return render_template('edit.html.j2', site = site[0], user = session['user'])
else :
flash("Ce site ne semble pas exister")
return redirect(url_for('admin'))
else:
if type(getHttpCode(url)) is int :
db.execute("UPDATE sites SET url = '%s' WHERE id = %s"%(str(url), id))
g.mysql_connection.commit()
flash("Le site ayant l'id " + str(id) + " a bien été modifié avec la valeur " + url)
return redirect(url_for('admin'))
else :
flash("Le site " + url + " ne semble pas exister ou n'autorise pas que vous l'analysiez")
return render_template('edit.html.j2', site = site[0], user = session['user'])
@app.route('/admin/delete/<int:id>', methods = ['GET', 'POST'])
def delete (id) :
if not session.get('user') or not session.get('user')[1] :
return redirect(url_for('login'))
db = get_db()
db.execute("SELECT * FROM sites WHERE id = %(id)s", ({'id': id}))
site = db.fetchone()
if db.rowcount > 0 :
if request.method == 'POST' :
db.execute("DELETE FROM sites WHERE id = %s"%(id))
g.mysql_connection.commit()
if db.rowcount > 0 :
flash("Le site ayant l'id " + str(id) + " a bien été supprimé")
else :
flash("Le site ayant l'id " + str(id) + " n'a pas pu être supprimé")
return redirect(url_for('admin'))
else :
return render_template('delete.html.j2', user = session['user'], site = site[1])
else :
flash("Le site ayant l'id " + str(id) + " ne semble pas exister")
return redirect(url_for('admin'))
@app.route('/history/<int:id>')
def history (id) :
if session.get('user') :
user = session['user']
else :
user = False
db = get_db()
db.execute("SELECT st.*, si.url FROM status st INNER JOIN sites si ON si.id = st.site_id WHERE site_id = %s ORDER BY date DESC"%(id))
status = db.fetchall()
return render_template('history.html.j2', user = user, status = status)
# Automatise en arrière plan la vérification des statuts et ce qui s'en suit
with app.app_context():
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
scheduler = BackgroundScheduler()
scheduler.add_job(
func=insertStatusCode,
trigger=IntervalTrigger(seconds=120),
replace_existing=True,
)
scheduler.start()
print(" * Starting scheduler")
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
|
[
"remi.mafat@ynov.com"
] |
remi.mafat@ynov.com
|
291010734539e7ab20e1d491367b537ab8e2ee72
|
5ead730e69f1042de1f43ac64e767d6463ffe691
|
/jarvis.py
|
dee14abefbba94a838d574ebb1cbbb1925540856
|
[
"MIT"
] |
permissive
|
ravi9607/basic-program-by-python
|
84258d110cdb9f9f285e93ddac5eb411d60fae77
|
534fc4a4c316ba0b8391f72647c4f9c9d33bb8e6
|
refs/heads/main
| 2022-12-26T05:33:35.965400
| 2020-10-10T13:07:51
| 2020-10-10T13:07:51
| 301,167,757
| 0
| 0
| null | 2020-10-04T15:56:25
| 2020-10-04T15:56:24
| null |
UTF-8
|
Python
| false
| false
| 1,610
|
py
|
# Just A Rather Very Intelligent System(JARVIS)
import pyttsx3
import datetime
import speech_recognition as sr
#import pyaudio
import webbrowser
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
#print(voices[0].id)
engine.setProperty('voice', voices[1].id)
def speak(audio):
engine.say(audio)
engine.runAndWait()
def wishme():
currentH = int(datetime.datetime.now().hour)
if currentH >= 0 and currentH < 12:
speak('Good Morning!')
elif currentH >= 12 and currentH < 18:
speak('Good Afternoon!')
else:
speak('Good Evening!')
speak('Hello Sir, I am your digital assistant LARVIS the Lady Jarvis!')
speak('How may I help you?')
def myCommand():
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pause_threshold = 1
audio = r.listen(source)
try:
query = r.recognize_google(audio, language='en-in')
print('User: ' + query + '\n')
except sr.UnknownValueError:
speak('Sorry sir! I didn\'t get that! Try typing the command!')
query = str(input('Command: '))
return query
if __name__ == '__main__':
wishme()
#speak('ravi gupta .. hello sir ')
while True:
query = myCommand();
query = query.lower()
if 'open youtube' in query:
# speak('okay')
webbrowser.open('www.youtube.com')
|
[
"noreply@github.com"
] |
noreply@github.com
|
7fd8f206504c97cba0f8df0e0556a9906a09a3ca
|
1aada0d009d1c30e44d94ea37844ed38b9036fb2
|
/core/migrations/0002_tournamentsite_zip.py
|
3a415fcc55556d18f9b89df9ee1b1f85ed078234
|
[
"MIT"
] |
permissive
|
grapesmoker/qtour
|
2c3c5aaaffa6da32826ba94604041fe6e7234973
|
dbb02613475362d9a4c7f86d047ddc012e4d8351
|
refs/heads/master
| 2021-01-20T05:57:09.027596
| 2017-05-28T01:13:34
| 2017-05-28T01:13:34
| 89,826,546
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 489
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-30 03:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='tournamentsite',
name='zip',
field=models.CharField(default=None, max_length=5),
preserve_default=False,
),
]
|
[
"grapesmoker@gmail.com"
] |
grapesmoker@gmail.com
|
8f20b2c52795c107419860fdb30820f09b595d1f
|
c71e2628754d182f86d269f0a3dff72ef87ddf3a
|
/chat/urls.py
|
2f418fe5da0ae0c84c858a73bb963faadbe94889
|
[
"MIT"
] |
permissive
|
xiaoqiao99/chat
|
3d838683deab854eb131744033384c73a75d2e4f
|
ca65ed25fbc277828390b890a50ecadf4675cfb4
|
refs/heads/master
| 2022-12-16T06:37:18.945390
| 2019-02-28T14:52:10
| 2019-02-28T14:52:10
| 172,876,427
| 2
| 3
|
MIT
| 2022-12-08T01:03:01
| 2019-02-27T08:37:09
|
Python
|
UTF-8
|
Python
| false
| false
| 181
|
py
|
from django.urls import path, re_path
from . import views
urlpatterns = [
path("", views.index, name='index'),
re_path("(?P<room_name>[^/]+)/", views.room, name='room'),
]
|
[
"hui.qiao@fir.ai"
] |
hui.qiao@fir.ai
|
825ea6911f74ab95016446105c315c0b28ad6eac
|
f099583724a138138dfbdb1e1202075058fe280d
|
/Society/execution/migrations/0009_guest_special_key.py
|
6c2df0c26b94e27d3a8e44ef7ee5329400035733
|
[] |
no_license
|
Dhruvanshu1775/Societyapp
|
87c8b43a036b70099a1c00df37c818c394d7da29
|
f1dea6cdcf5b1ef771afcb4e70f247a8a9357eeb
|
refs/heads/master
| 2023-08-19T01:20:16.366069
| 2021-10-01T06:17:10
| 2021-10-01T06:17:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 631
|
py
|
# Generated by Django 3.2.7 on 2021-09-29 04:42
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('execution', '0008_auto_20210928_1609'),
]
operations = [
migrations.AddField(
model_name='guest',
name='special_key',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='specikey', to=settings.AUTH_USER_MODEL),
),
]
|
[
"dhruvanshu1775@gmail.com"
] |
dhruvanshu1775@gmail.com
|
af2e9492dc28f8de8f275110fb743e9b78dbf797
|
3ef70fe63acaa665e2b163f30f1abd0a592231c1
|
/stackoverflow/venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_internal/vcs/__init__.py
|
9cba76464ca1a26eb69d5c9dbf37f46eb9dc78f4
|
[
"MIT"
] |
permissive
|
wistbean/learn_python3_spider
|
14914b63691ac032955ba1adc29ad64976d80e15
|
40861791ec4ed3bbd14b07875af25cc740f76920
|
refs/heads/master
| 2023-08-16T05:42:27.208302
| 2023-03-30T17:03:58
| 2023-03-30T17:03:58
| 179,152,420
| 14,403
| 3,556
|
MIT
| 2022-05-20T14:08:34
| 2019-04-02T20:19:54
|
Python
|
UTF-8
|
Python
| false
| false
| 17,278
|
py
|
"""Handles all VCS (version control) support"""
from __future__ import absolute_import
import errno
import logging
import os
import shutil
import sys
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._internal.exceptions import BadCommand
from pip._internal.utils.misc import (
display_path, backup_dir, call_subprocess, rmtree, ask_path_exists,
)
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import ( # noqa: F401
Any, Dict, Iterable, List, Mapping, Optional, Text, Tuple, Type
)
from pip._internal.utils.ui import SpinnerInterface # noqa: F401
AuthInfo = Tuple[Optional[str], Optional[str]]
__all__ = ['vcs']
logger = logging.getLogger(__name__)
class RemoteNotFoundError(Exception):
pass
class RevOptions(object):
"""
Encapsulates a VCS-specific revision to install, along with any VCS
install options.
Instances of this class should be treated as if immutable.
"""
def __init__(self, vcs, rev=None, extra_args=None):
# type: (VersionControl, Optional[str], Optional[List[str]]) -> None
"""
Args:
vcs: a VersionControl object.
rev: the name of the revision to install.
extra_args: a list of extra options.
"""
if extra_args is None:
extra_args = []
self.extra_args = extra_args
self.rev = rev
self.vcs = vcs
def __repr__(self):
return '<RevOptions {}: rev={!r}>'.format(self.vcs.name, self.rev)
@property
def arg_rev(self):
# type: () -> Optional[str]
if self.rev is None:
return self.vcs.default_arg_rev
return self.rev
def to_args(self):
# type: () -> List[str]
"""
Return the VCS-specific command arguments.
"""
args = [] # type: List[str]
rev = self.arg_rev
if rev is not None:
args += self.vcs.get_base_rev_args(rev)
args += self.extra_args
return args
def to_display(self):
# type: () -> str
if not self.rev:
return ''
return ' (to revision {})'.format(self.rev)
def make_new(self, rev):
# type: (str) -> RevOptions
"""
Make a copy of the current instance, but with a new rev.
Args:
rev: the name of the revision for the new object.
"""
return self.vcs.make_rev_options(rev, extra_args=self.extra_args)
class VcsSupport(object):
_registry = {} # type: Dict[str, Type[VersionControl]]
schemes = ['ssh', 'git', 'hg', 'bzr', 'sftp', 'svn']
def __init__(self):
# type: () -> None
# Register more schemes with urlparse for various version control
# systems
urllib_parse.uses_netloc.extend(self.schemes)
# Python >= 2.7.4, 3.3 doesn't have uses_fragment
if getattr(urllib_parse, 'uses_fragment', None):
urllib_parse.uses_fragment.extend(self.schemes)
super(VcsSupport, self).__init__()
def __iter__(self):
return self._registry.__iter__()
@property
def backends(self):
# type: () -> List[Type[VersionControl]]
return list(self._registry.values())
@property
def dirnames(self):
# type: () -> List[str]
return [backend.dirname for backend in self.backends]
@property
def all_schemes(self):
# type: () -> List[str]
schemes = [] # type: List[str]
for backend in self.backends:
schemes.extend(backend.schemes)
return schemes
def register(self, cls):
# type: (Type[VersionControl]) -> None
if not hasattr(cls, 'name'):
logger.warning('Cannot register VCS %s', cls.__name__)
return
if cls.name not in self._registry:
self._registry[cls.name] = cls
logger.debug('Registered VCS backend: %s', cls.name)
def unregister(self, cls=None, name=None):
# type: (Optional[Type[VersionControl]], Optional[str]) -> None
if name in self._registry:
del self._registry[name]
elif cls in self._registry.values():
del self._registry[cls.name]
else:
logger.warning('Cannot unregister because no class or name given')
def get_backend_type(self, location):
# type: (str) -> Optional[Type[VersionControl]]
"""
Return the type of the version control backend if found at given
location, e.g. vcs.get_backend_type('/path/to/vcs/checkout')
"""
for vc_type in self._registry.values():
if vc_type.controls_location(location):
logger.debug('Determine that %s uses VCS: %s',
location, vc_type.name)
return vc_type
return None
def get_backend(self, name):
# type: (str) -> Optional[Type[VersionControl]]
name = name.lower()
if name in self._registry:
return self._registry[name]
return None
vcs = VcsSupport()
class VersionControl(object):
name = ''
dirname = ''
repo_name = ''
# List of supported schemes for this Version Control
schemes = () # type: Tuple[str, ...]
# Iterable of environment variable names to pass to call_subprocess().
unset_environ = () # type: Tuple[str, ...]
default_arg_rev = None # type: Optional[str]
def __init__(self, url=None, *args, **kwargs):
self.url = url
super(VersionControl, self).__init__(*args, **kwargs)
def get_base_rev_args(self, rev):
"""
Return the base revision arguments for a vcs command.
Args:
rev: the name of a revision to install. Cannot be None.
"""
raise NotImplementedError
def make_rev_options(self, rev=None, extra_args=None):
# type: (Optional[str], Optional[List[str]]) -> RevOptions
"""
Return a RevOptions object.
Args:
rev: the name of a revision to install.
extra_args: a list of extra options.
"""
return RevOptions(self, rev, extra_args=extra_args)
@classmethod
def _is_local_repository(cls, repo):
# type: (str) -> bool
"""
posix absolute paths start with os.path.sep,
win32 ones start with drive (like c:\\folder)
"""
drive, tail = os.path.splitdrive(repo)
return repo.startswith(os.path.sep) or bool(drive)
def export(self, location):
"""
Export the repository at the url to the destination location
i.e. only download the files, without vcs informations
"""
raise NotImplementedError
def get_netloc_and_auth(self, netloc, scheme):
"""
Parse the repository URL's netloc, and return the new netloc to use
along with auth information.
Args:
netloc: the original repository URL netloc.
scheme: the repository URL's scheme without the vcs prefix.
This is mainly for the Subversion class to override, so that auth
information can be provided via the --username and --password options
instead of through the URL. For other subclasses like Git without
such an option, auth information must stay in the URL.
Returns: (netloc, (username, password)).
"""
return netloc, (None, None)
def get_url_rev_and_auth(self, url):
# type: (str) -> Tuple[str, Optional[str], AuthInfo]
"""
Parse the repository URL to use, and return the URL, revision,
and auth info to use.
Returns: (url, rev, (username, password)).
"""
scheme, netloc, path, query, frag = urllib_parse.urlsplit(url)
if '+' not in scheme:
raise ValueError(
"Sorry, {!r} is a malformed VCS url. "
"The format is <vcs>+<protocol>://<url>, "
"e.g. svn+http://myrepo/svn/MyApp#egg=MyApp".format(url)
)
# Remove the vcs prefix.
scheme = scheme.split('+', 1)[1]
netloc, user_pass = self.get_netloc_and_auth(netloc, scheme)
rev = None
if '@' in path:
path, rev = path.rsplit('@', 1)
url = urllib_parse.urlunsplit((scheme, netloc, path, query, ''))
return url, rev, user_pass
def make_rev_args(self, username, password):
"""
Return the RevOptions "extra arguments" to use in obtain().
"""
return []
def get_url_rev_options(self, url):
# type: (str) -> Tuple[str, RevOptions]
"""
Return the URL and RevOptions object to use in obtain() and in
some cases export(), as a tuple (url, rev_options).
"""
url, rev, user_pass = self.get_url_rev_and_auth(url)
username, password = user_pass
extra_args = self.make_rev_args(username, password)
rev_options = self.make_rev_options(rev, extra_args=extra_args)
return url, rev_options
def normalize_url(self, url):
# type: (str) -> str
"""
Normalize a URL for comparison by unquoting it and removing any
trailing slash.
"""
return urllib_parse.unquote(url).rstrip('/')
def compare_urls(self, url1, url2):
# type: (str, str) -> bool
"""
Compare two repo URLs for identity, ignoring incidental differences.
"""
return (self.normalize_url(url1) == self.normalize_url(url2))
def fetch_new(self, dest, url, rev_options):
"""
Fetch a revision from a repository, in the case that this is the
first fetch from the repository.
Args:
dest: the directory to fetch the repository to.
rev_options: a RevOptions object.
"""
raise NotImplementedError
def switch(self, dest, url, rev_options):
"""
Switch the repo at ``dest`` to point to ``URL``.
Args:
rev_options: a RevOptions object.
"""
raise NotImplementedError
def update(self, dest, url, rev_options):
"""
Update an already-existing repo to the given ``rev_options``.
Args:
rev_options: a RevOptions object.
"""
raise NotImplementedError
def is_commit_id_equal(self, dest, name):
"""
Return whether the id of the current commit equals the given name.
Args:
dest: the repository directory.
name: a string name.
"""
raise NotImplementedError
def obtain(self, dest):
# type: (str) -> None
"""
Install or update in editable mode the package represented by this
VersionControl object.
Args:
dest: the repository directory in which to install or update.
"""
url, rev_options = self.get_url_rev_options(self.url)
if not os.path.exists(dest):
self.fetch_new(dest, url, rev_options)
return
rev_display = rev_options.to_display()
if self.is_repository_directory(dest):
existing_url = self.get_remote_url(dest)
if self.compare_urls(existing_url, url):
logger.debug(
'%s in %s exists, and has correct URL (%s)',
self.repo_name.title(),
display_path(dest),
url,
)
if not self.is_commit_id_equal(dest, rev_options.rev):
logger.info(
'Updating %s %s%s',
display_path(dest),
self.repo_name,
rev_display,
)
self.update(dest, url, rev_options)
else:
logger.info('Skipping because already up-to-date.')
return
logger.warning(
'%s %s in %s exists with URL %s',
self.name,
self.repo_name,
display_path(dest),
existing_url,
)
prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ',
('s', 'i', 'w', 'b'))
else:
logger.warning(
'Directory %s already exists, and is not a %s %s.',
dest,
self.name,
self.repo_name,
)
# https://github.com/python/mypy/issues/1174
prompt = ('(i)gnore, (w)ipe, (b)ackup ', # type: ignore
('i', 'w', 'b'))
logger.warning(
'The plan is to install the %s repository %s',
self.name,
url,
)
response = ask_path_exists('What to do? %s' % prompt[0], prompt[1])
if response == 'a':
sys.exit(-1)
if response == 'w':
logger.warning('Deleting %s', display_path(dest))
rmtree(dest)
self.fetch_new(dest, url, rev_options)
return
if response == 'b':
dest_dir = backup_dir(dest)
logger.warning(
'Backing up %s to %s', display_path(dest), dest_dir,
)
shutil.move(dest, dest_dir)
self.fetch_new(dest, url, rev_options)
return
# Do nothing if the response is "i".
if response == 's':
logger.info(
'Switching %s %s to %s%s',
self.repo_name,
display_path(dest),
url,
rev_display,
)
self.switch(dest, url, rev_options)
def unpack(self, location):
# type: (str) -> None
"""
Clean up current location and download the url repository
(and vcs infos) into location
"""
if os.path.exists(location):
rmtree(location)
self.obtain(location)
@classmethod
def get_src_requirement(cls, location, project_name):
"""
Return a string representing the requirement needed to
redownload the files currently present in location, something
like:
{repository_url}@{revision}#egg={project_name}-{version_identifier}
"""
raise NotImplementedError
@classmethod
def get_remote_url(cls, location):
"""
Return the url used at location
Raises RemoteNotFoundError if the repository does not have a remote
url configured.
"""
raise NotImplementedError
@classmethod
def get_revision(cls, location):
"""
Return the current commit id of the files at the given location.
"""
raise NotImplementedError
@classmethod
def run_command(
cls,
cmd, # type: List[str]
show_stdout=True, # type: bool
cwd=None, # type: Optional[str]
on_returncode='raise', # type: str
extra_ok_returncodes=None, # type: Optional[Iterable[int]]
command_desc=None, # type: Optional[str]
extra_environ=None, # type: Optional[Mapping[str, Any]]
spinner=None # type: Optional[SpinnerInterface]
):
# type: (...) -> Optional[Text]
"""
Run a VCS subcommand
This is simply a wrapper around call_subprocess that adds the VCS
command name, and checks that the VCS is available
"""
cmd = [cls.name] + cmd
try:
return call_subprocess(cmd, show_stdout, cwd,
on_returncode=on_returncode,
extra_ok_returncodes=extra_ok_returncodes,
command_desc=command_desc,
extra_environ=extra_environ,
unset_environ=cls.unset_environ,
spinner=spinner)
except OSError as e:
# errno.ENOENT = no such file or directory
# In other words, the VCS executable isn't available
if e.errno == errno.ENOENT:
raise BadCommand(
'Cannot find command %r - do you have '
'%r installed and in your '
'PATH?' % (cls.name, cls.name))
else:
raise # re-raise exception if a different error occurred
@classmethod
def is_repository_directory(cls, path):
# type: (str) -> bool
"""
Return whether a directory path is a repository directory.
"""
logger.debug('Checking in %s for %s (%s)...',
path, cls.dirname, cls.name)
return os.path.exists(os.path.join(path, cls.dirname))
@classmethod
def controls_location(cls, location):
# type: (str) -> bool
"""
Check if a location is controlled by the vcs.
It is meant to be overridden to implement smarter detection
mechanisms for specific vcs.
This can do more than is_repository_directory() alone. For example,
the Git override checks that Git is actually available.
"""
return cls.is_repository_directory(location)
|
[
"354142480@qq.com"
] |
354142480@qq.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.