blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
46efb78a2acbcd58008ea1bc5f50998401a4a474
|
557c9d016edc56c72dac6a0b3093d195b10b1a6c
|
/examples/ex33_common.py
|
cd535acf255a5de421d64d16d452c5e8f5303ca4
|
[
"BSD-3-Clause"
] |
permissive
|
mfem/PyMFEM
|
dae79fb8aa9ca983780c789b2b6e70e894644573
|
37084af6798ea8bb8f03b2648863befabee829bb
|
refs/heads/master
| 2023-08-31T07:04:28.522945
| 2023-07-18T02:21:57
| 2023-07-18T02:21:57
| 83,574,932
| 154
| 54
|
BSD-3-Clause
| 2023-08-15T01:40:47
| 2017-03-01T16:16:48
|
SWIG
|
UTF-8
|
Python
| false
| false
| 8,356
|
py
|
'''
Ex33_common.py
This is a translation of MFEM ex33.hpp. LAPACK call, mfem.Vector,
mfem.DenseMatrix are replaced using numpy
(Implementation of the AAA algorithm)
Here, we implement the triple-A algorithm [1] for the rational approximation
of complex-valued functions,
p(z)/q(z) ≈ f(z).
In this file, we always assume f(z) = z^{-α}. The triple-A algorithm
provides a robust, accurate approximation in rational barycentric form.
This representation must be transformed into a partial fraction
representation in order to be used to solve a spectral FPDE.
More specifically, we first expand the numerator in terms of the zeros of
the rational approximation,
p(z) ∝ Π_i (z - z_i),
and expand the denominator in terms of the poles of the rational
approximation,
q(z) ∝ Π_i (z - p_i).
We then use these zeros and poles to derive the partial fraction expansion
f(z) ≈ p(z)/q(z) = Σ_i c_i / (z - p_i).
[1] Nakatsukasa, Y., Sète, O., & Trefethen, L. N. (2018). The AAA algorithm
for rational approximation. SIAM Journal on Scientific Computing, 40(3),
A1494-A1522.
'''
import numpy as np
import scipy
from scipy.linalg import eig
import mfem
if mfem.mfem_mode == 'parallel':
import mfem.par as mfem
from mfem.par import intArray, doubleArray
else:
import mfem.ser as mfem
from mfem.ser import intArray, doubleArray
from sys import float_info
eps = float_info.min
def RationalApproximation_AAA(val, pt, tol, max_order):
'''
RationalApproximation_AAA: compute the rational approximation (RA) of data
val at the set of points pt
in:
val Vector of data values
pt Vector of sample points
tol Relative tolerance
max_order Maximum number of terms (order) of the RA
out:
z Support points of the RA in rational barycentric form
f Data values at support points at z
w Weights of the RA in rational barycentric form
See pg. A1501 of Nakatsukasa et al. [1].
'''
# number of sample points
size = len(val) # .Size()
assert len(pt) == size, "size mismatch"
# Initializations
J = list(range(size))
c_i = []
# mean of the value vector
mean_val = np.mean(val)
R = np.array([mean_val]*size)
z = []
f = []
w = []
for k in range(max_order):
# select next support point
idx = 0
tmp_max = 0
idx = np.argmax(np.abs(val-R))
# Append support points and data values
z.append(pt[idx])
f.append(val[idx])
# Update index vector
J.remove(idx)
# next column in Cauchy matrix
C_tmp = [(1.0/(pp-pt[idx]) if pp != pt[idx] else np.inf) for pp in pt]
c_i = np.hstack((c_i, C_tmp))
h_C = len(C_tmp)
w_C = k+1
# note: tranpose is necessary due to the difference of column-major
# and raw-major of matrix
C = c_i.reshape(w_C, h_C).transpose()
Ctemp = C.copy()
Ctemp = Ctemp*(np.atleast_2d(1/val)).transpose() # InvLeftScaling
Ctemp = Ctemp*f # RgithScaling
A = C - Ctemp
A = A*(np.atleast_2d(val)).transpose() # LeftScaling
h_Am = len(J)
w_Am = A.shape[1]
Am = np.zeros((h_Am, w_Am))
for i in range(h_Am):
ii = J[i]
for j in range(w_Am):
Am[i, j] = A[ii, j]
u, s, vh = np.linalg.svd(Am)
w = vh[k, :]
N = C.dot(w*np.array(f))
D = C.dot(w)
R = val.copy()
for i, ii in enumerate(J):
R[ii] = N[ii]/D[ii]
verr = val - R
if np.max(verr) <= tol*max(val):
break
return z, f, w
def ComputePolesAndZeros(z, f, w):
'''
ComputePolesAndZeros: compute the poles and zeros of the
rational function f(z) = C p(z)/q(z) from its ration barycentric form.
in:
z Support points in rational barycentric form
f Data values at support points @a z
w Weights in rational barycentric form
out:
poles Array of poles (roots of p(z))
zeros Array of zeros (roots of q(z))
scale Scaling constant in f(z) = C p(z)/q(z)
See pg. A1501 of Nakatsukasa et al. [1].
'''
# Initialization
poles = []
zeros = []
# Compute the poles
m = len(w)
B = np.zeros((m+1, m+1))
E = np.zeros((m+1, m+1))
for i in range(m+1):
if i == 0:
continue
B[i, i] = 1.
E[0, i] = w[i-1]
E[i, 0] = 1.
E[i, i] = z[i-1]
# real part of eigen value
evalues = eig(E, B, left=False, right=False).real
new_poles = evalues[np.isfinite(evalues)]
poles.extend(new_poles)
B = np.zeros((m+1, m+1))
E = np.zeros((m+1, m+1))
for i in range(m+1):
if i == 0:
continue
B[i, i] = 1.
E[0, i] = w[i-1] * f[i-1]
E[i, 0] = 1.
E[i, i] = z[i-1]
# real part of eigen value
evalues = eig(E, B, left=False, right=False).real
new_zeros = evalues[np.isfinite(evalues)]
zeros.extend(new_zeros)
scale = np.dot(w, f)/np.sum(w)
return poles, zeros, scale
def PartialFractionExpansion(scale, poles, zeros):
'''
PartialFractionExpansion: compute the partial fraction expansion of the
rational function f(z) = Σ_i c_i / (z - p_i) from its poles and zeros
@a zeros [in].
in:
poles Array of poles (same as p_i above)
zeros Array of zeros
scale Scaling constant
out:
coeffs Coefficients c_i
'''
# Note: C p(z)/q(z) = Σ_i c_i / (z - p_i) results in an system of equations
# where the N unknowns are the coefficients c_i. After multiplying the
# system with q(z), the coefficients c_i can be computed analytically by
# choosing N values for z. Choosing z_j = = p_j diagonalizes the system and
# one can obtain an analytic form for the c_i coefficients. The result is
# implemented in the code block below.
psize = len(poles)
zsize = len(zeros)
coeffs = [scale] * psize
for i in range(psize):
tmp_numer = 1.0
for j in range(zsize):
tmp_numer *= poles[i]-zeros[j]
tmp_denom = 1.0
for k in range(psize):
if k != i:
tmp_denom *= poles[i]-poles[k]
coeffs[i] *= tmp_numer / tmp_denom
return coeffs
def ComputePartialFractionApproximation(alpha,
lmax=1000.,
tol=1e-10,
npoints=1000,
max_order=100):
'''
ComputePartialFractionApproximation: compute a rational approximation (RA)
in partial fraction form, e.g., f(z) ≈ Σ_i c_i / (z - p_i), from sampled
values of the function f(z) = z^{-a}, 0 < a < 1.
in:
alpha Exponent a in f(z) = z^-a
lmax,npoints f(z) is uniformly sampled @a npoints times in the
interval [ 0, @a lmax ]
tol Relative tolerance
max_order Maximum number of terms (order) of the RA
out:
coeffs Coefficients c_i
poles Poles p_i
'''
assert alpha < 1., "alpha must be less than 1"
assert alpha > 0., "alpha must be greater than 0"
assert npoints > 2, "npoints must be greater than 2"
assert lmax > 0, "lmin must be greater than 0"
assert tol > 0, "tol must be greater than 0"
dx = lmax / (npoints-1)
x = np.arange(npoints)*dx
val = x**(1-alpha)
# Apply triple-A algorithm to f(x) = x^{1-a}
z, f, w = RationalApproximation_AAA(val, # mfem.Vector(val),
x, # mfem.Vector(x),
tol, max_order)
# Compute poles and zeros for RA of f(x) = x^{1-a}
poles, zeros, scale = ComputePolesAndZeros(z, f, w)
# Remove the zero at x=0, thus, delivering a RA for f(x) = x^{-a}
zeros.remove(0.0)
# Compute partial fraction approximation of f(x) = x^{-a}
coeffs = PartialFractionExpansion(scale, poles, zeros)
return poles, coeffs
|
[
"shiraiwa@princeton.edu"
] |
shiraiwa@princeton.edu
|
c4e3d2c0198df15fcb9635b190ff937c0a238289
|
5c35be01a7f659bb080544c5e62faa22307f01da
|
/pr412-my-charity-change-backend-python/migrations/versions/28f74cb10e35_add_send_tax_reciept_flasg_to_customer.py
|
383f316a63eb39b17dddf20b2043b814522640bd
|
[] |
no_license
|
dragonmaster-alpha/Charity-App
|
3b3932c0a05cc21b9d36bd2952673028cc56a11a
|
b66e2bc74fc15ca2a9c70f5261d05f5b9d17b451
|
refs/heads/master
| 2023-08-24T02:21:15.406784
| 2021-10-29T06:18:20
| 2021-10-29T06:18:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 701
|
py
|
"""Add send_tax_reciept flasg to customer
Revision ID: 28f74cb10e35
Revises: f3bdf790db9b
Create Date: 2020-08-10 14:53:05.732193
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '28f74cb10e35'
down_revision = 'f3bdf790db9b'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('Customer', sa.Column('send_tax_reciept', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('Customer', 'send_tax_reciept')
# ### end Alembic commands ###
|
[
"78602151+dragonmaster-alpha@users.noreply.github.com"
] |
78602151+dragonmaster-alpha@users.noreply.github.com
|
ed80bc16dfb22d80024c3f7ec27c4aa5882763ad
|
9a8746628978eb368da0c4aea7da9ad0818b0c75
|
/StreamLitLibrary.py
|
1f14d7959d1c59d1779fd1855080ec70de2cf10a
|
[] |
no_license
|
jayreds73/Heroku-Deployment
|
67999469165f4f9bee91252aef34a54de518b51b
|
bfa893a19004c1418e8a0ac72d1e522b03ae790f
|
refs/heads/master
| 2022-12-15T03:17:28.359818
| 2020-09-09T20:10:53
| 2020-09-09T20:10:53
| 294,200,010
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,725
|
py
|
#from flask import Flask,request
import pandas as pd
import pickle as pkl
import streamlit as st
# Run this - "streamlit run filename" in command prompt
# load the model at the start of the app
pickle_in = open('model.pkl','rb')
model_iris = pkl.load(pickle_in)
def get_description(int_code):
if (int_code==0):
desc = 'Setosa'
elif (int_code == 1):
desc = 'Versicolour'
else:
desc = 'Virginica'
return desc
def Welcome():
return "Hello world, Jayanth"
def iris_predict(sl,sw,pl,pw):
prediction = model_iris.predict([[sl,sw,pl,pw]])
return "The prediction is: " + get_description(int(prediction[0]))
def main():
#Gives Title
st.title("Iris Data Set Prediction")
# Creates look and feel -- see more for html
html_temp = """
<div style="background-color:tomato;padding:10px">
<h2 style="color:white;text-align:center;">Streamlit Rendered App for IRIS prediction </h2>
</div>
"""
# Executes HTML
st.markdown(html_temp, unsafe_allow_html=True)
sl = float(st.text_input('Sepal Length','1.25'))
sw = float(st.text_input('Sepal Width','2.25'))
pl = float(st.text_input('Petal Length','3.25'))
pw = float(st.text_input('Petal Width','4.8'))
prediction = ""
# create button
if st.button("Predict"):
prediction = iris_predict(sl,sw,pl,pw)
st.success(prediction)
# prediction_t = ""
# if st.button("Test"):
# prediction_t = 'Pass'
# st.success(prediction_t)
# if st.button("About"):
# st.text("Lets LEarn")
# st.text("Built with Streamlit")
if(__name__=='__main__'):
main()
|
[
"noreply@github.com"
] |
jayreds73.noreply@github.com
|
fa831199505226547d9cfa53b8caf0ccbd1afd58
|
fa7e75212e9f536eed7a78237a5fa9a4021a206b
|
/OLD_ROOT/Backend/SMQTK_Backend/utils/jsmin/test.py
|
7aba6993dc941efa2e6ea9557fd99d5a9b43b720
|
[] |
no_license
|
kod3r/SMQTK
|
3d40730c956220a3d9bb02aef65edc8493bbf527
|
c128e8ca38c679ee37901551f4cc021cc43d00e6
|
refs/heads/master
| 2020-12-03T09:12:41.163643
| 2015-10-19T14:56:55
| 2015-10-19T14:56:55
| 44,916,678
| 1
| 0
| null | 2015-10-25T15:47:35
| 2015-10-25T15:47:35
| null |
UTF-8
|
Python
| false
| false
| 8,702
|
py
|
import unittest
import sys
# modified path since this is now being embeded in another project.
from SMQTK_Backend.utils import jsmin
class JsTests(unittest.TestCase):
def _minify(self, js):
return jsmin.jsmin(js)
def assertEqual(self, thing1, thing2):
if thing1 != thing2:
print(repr(thing1), repr(thing2))
raise AssertionError
return True
def assertMinified(self, js_input, expected):
minified = jsmin.jsmin(js_input)
assert minified == expected, "%r != %r" % (minified, expected)
def testQuoted(self):
js = r'''
Object.extend(String, {
interpret: function(value) {
return value == null ? '' : String(value);
},
specialChar: {
'\b': '\\b',
'\t': '\\t',
'\n': '\\n',
'\f': '\\f',
'\r': '\\r',
'\\': '\\\\'
}
});
'''
expected = r"""Object.extend(String,{interpret:function(value){return value==null?'':String(value);},specialChar:{'\b':'\\b','\t':'\\t','\n':'\\n','\f':'\\f','\r':'\\r','\\':'\\\\'}});"""
self.assertMinified(js, expected)
def testSingleComment(self):
js = r'''// use native browser JS 1.6 implementation if available
if (Object.isFunction(Array.prototype.forEach))
Array.prototype._each = Array.prototype.forEach;
if (!Array.prototype.indexOf) Array.prototype.indexOf = function(item, i) {
// hey there
function() {// testing comment
foo;
//something something
location = 'http://foo.com;'; // goodbye
}
//bye
'''
expected = r"""
if(Object.isFunction(Array.prototype.forEach))
Array.prototype._each=Array.prototype.forEach;if(!Array.prototype.indexOf)Array.prototype.indexOf=function(item,i){ function(){ foo; location='http://foo.com;';}"""
# print expected
self.assertMinified(js, expected)
def testEmpty(self):
self.assertMinified('', '')
self.assertMinified(' ', '')
self.assertMinified('\n', '')
self.assertMinified('\r\n', '')
self.assertMinified('\t', '')
def testMultiComment(self):
js = r"""
function foo() {
print('hey');
}
/*
if(this.options.zindex) {
this.originalZ = parseInt(Element.getStyle(this.element,'z-index') || 0);
this.element.style.zIndex = this.options.zindex;
}
*/
another thing;
"""
expected = r"""function foo(){print('hey');}
another thing;"""
self.assertMinified(js, expected)
def testLeadingComment(self):
js = r"""/* here is a comment at the top
it ends here */
function foo() {
alert('crud');
}
"""
expected = r"""function foo(){alert('crud');}"""
self.assertMinified(js, expected)
def testJustAComment(self):
self.assertMinified(' // a comment', '')
def testRe(self):
js = r'''
var str = this.replace(/\\./g, '@').replace(/"[^"\\\n\r]*"/g, '');
return (/^[,:{}\[\]0-9.\-+Eaeflnr-u \n\r\t]*$/).test(str);
});'''
expected = r"""var str=this.replace(/\\./g,'@').replace(/"[^"\\\n\r]*"/g,'');return(/^[,:{}\[\]0-9.\-+Eaeflnr-u \n\r\t]*$/).test(str);});"""
self.assertMinified(js, expected)
def testIgnoreComment(self):
js = r"""
var options_for_droppable = {
overlap: options.overlap,
containment: options.containment,
tree: options.tree,
hoverclass: options.hoverclass,
onHover: Sortable.onHover
}
var options_for_tree = {
onHover: Sortable.onEmptyHover,
overlap: options.overlap,
containment: options.containment,
hoverclass: options.hoverclass
}
// fix for gecko engine
Element.cleanWhitespace(element);
"""
expected = r"""var options_for_droppable={overlap:options.overlap,containment:options.containment,tree:options.tree,hoverclass:options.hoverclass,onHover:Sortable.onHover}
var options_for_tree={onHover:Sortable.onEmptyHover,overlap:options.overlap,containment:options.containment,hoverclass:options.hoverclass}
Element.cleanWhitespace(element);"""
self.assertMinified(js, expected)
def testHairyRe(self):
js = r"""
inspect: function(useDoubleQuotes) {
var escapedString = this.gsub(/[\x00-\x1f\\]/, function(match) {
var character = String.specialChar[match[0]];
return character ? character : '\\u00' + match[0].charCodeAt().toPaddedString(2, 16);
});
if (useDoubleQuotes) return '"' + escapedString.replace(/"/g, '\\"') + '"';
return "'" + escapedString.replace(/'/g, '\\\'') + "'";
},
toJSON: function() {
return this.inspect(true);
},
unfilterJSON: function(filter) {
return this.sub(filter || Prototype.JSONFilter, '#{1}');
},
"""
expected = r"""inspect:function(useDoubleQuotes){var escapedString=this.gsub(/[\x00-\x1f\\]/,function(match){var character=String.specialChar[match[0]];return character?character:'\\u00'+match[0].charCodeAt().toPaddedString(2,16);});if(useDoubleQuotes)return'"'+escapedString.replace(/"/g,'\\"')+'"';return"'"+escapedString.replace(/'/g,'\\\'')+"'";},toJSON:function(){return this.inspect(true);},unfilterJSON:function(filter){return this.sub(filter||Prototype.JSONFilter,'#{1}');},"""
self.assertMinified(js, expected)
def testNoBracesWithComment(self):
js = r"""
onSuccess: function(transport) {
var js = transport.responseText.strip();
if (!/^\[.*\]$/.test(js)) // TODO: improve sanity check
throw 'Server returned an invalid collection representation.';
this._collection = eval(js);
this.checkForExternalText();
}.bind(this),
onFailure: this.onFailure
});
"""
expected = r"""onSuccess:function(transport){var js=transport.responseText.strip();if(!/^\[.*\]$/.test(js))
throw'Server returned an invalid collection representation.';this._collection=eval(js);this.checkForExternalText();}.bind(this),onFailure:this.onFailure});"""
self.assertMinified(js, expected)
def testSpaceInRe(self):
js = r"""
num = num.replace(/ /g,'');
"""
self.assertMinified(js, "num=num.replace(/ /g,'');")
def testEmptyString(self):
js = r'''
function foo('') {
}
'''
self.assertMinified(js, "function foo(''){}")
def testDoubleSpace(self):
js = r'''
var foo = "hey";
'''
self.assertMinified(js, 'var foo="hey";')
def testLeadingRegex(self):
js = r'/[d]+/g '
self.assertMinified(js, js.strip())
def testLeadingString(self):
js = r"'a string in the middle of nowhere'; // and a comment"
self.assertMinified(js, "'a string in the middle of nowhere';")
def testSingleCommentEnd(self):
js = r'// a comment\n'
self.assertMinified(js, '')
def testInputStream(self):
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
ins = StringIO(r'''
function foo('') {
}
''')
outs = StringIO()
m = jsmin.JavascriptMinify()
m.minify(ins, outs)
output = outs.getvalue()
assert output == "function foo(''){}"
def testUnicode(self):
instr = u'\u4000 //foo'
expected = u'\u4000'
output = jsmin.jsmin(instr)
self.assertEqual(output, expected)
def testCommentBeforeEOF(self):
self.assertMinified("//test\r\n", "")
def testCommentInObj(self):
self.assertMinified("""{
a: 1,//comment
}""", "{a:1,}")
def testCommentInObj2(self):
self.assertMinified("{a: 1//comment\r\n}", "{a:1\n}")
def testImplicitSemicolon(self):
# return \n 1 is equivalent with return; 1
# so best make sure jsmin retains the newline
self.assertMinified("return;//comment\r\na", "return;a")
def testImplicitSemicolon2(self):
self.assertMinified("return//comment...\r\na", "return\na")
def testSingleComment2(self):
self.assertMinified('x.replace(/\//, "_")// slash to underscore',
'x.replace(/\//,"_")')
if __name__ == '__main__':
unittest.main()
|
[
"paul.tunison@kitware.com"
] |
paul.tunison@kitware.com
|
4b06fc12b6a9688a025351ad8cbe58b26b21f7c5
|
754b824173f3e5b1cef69890deaea71c2ad0b84d
|
/Linear Regression.py
|
3d28b9e8ff31b546708213c7417bde49d549cbcf
|
[] |
no_license
|
yanivmm/python
|
5a8e5c8a59c27a280a92cc7b8617e73b9d1c9a6e
|
e27f4d0716525e9c4ee6c8541cdb048670f5db22
|
refs/heads/master
| 2023-02-03T11:49:35.837406
| 2020-12-24T10:53:10
| 2020-12-24T10:53:10
| 257,415,163
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,241
|
py
|
#import
import pandas as pd
import numpy as np
import datetime
import matplotlib.pyplot as plt
import seaborn as sns
# read file
path = r'C:\Users\97250\Desktop\studied\R ,python\ניתוח מידע\Ecommerce Customers.csv'
cust = pd.read_csv(path)
# explore data
# A try to search the most affecting column on the Yearly Amount Spent and other
sns.jointplot(x='Time on Website',y='Yearly Amount Spent',data=cust)
sns.jointplot(x='Time on App',y='Yearly Amount Spent',data=cust)
sns.lmplot(x='Length of Membership',y='Yearly Amount Spent',data = cust)
#pairplot
sns.pairplot(cust)
### Training and Testing Data
from sklearn.model_selection import train_test_split
X=cust[['Avg. Session Length', 'Time on App','Time on Website', 'Length of Membership']]
y=cust['Yearly Amount Spent']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=101)
# model
from sklearn.linear_model import LinearRegression
lm =LinearRegression().fit(X_train,y_train)
lm.coef_
prediction = lm.predict(X_test)
# visual plot of the differences between y_test and prediction
sns.scatterplot(x = y_test,y = prediction, hue =(abs(prediction-y_test)))
# numerical evaluation
MAE = np.mean(abs(prediction-y_test))
MSE = np.mean((prediction-y_test)**2)
RMSE= np.sqrt(np.mean((prediction-y_test)**2))
print('\n')
print('MAE: '+str(MAE),'MSE: '+str(MSE),'RMSE: '+str(RMSE),sep = '\n')
# plot of the residuals of the y_test and prediction
residuals = (y_test-prediction)
plt.figure(figsize=(12,8))
sns.distplot(residuals,bins = 60,color='red')
# it's a normal distribution therefore it's a fine model.!
#creating a dataframe of the coefficients and its values
coefficient = lm.coef_
col = ['Avg. Session Length', 'Time on App','Time on Website', 'Length of Membership']
coefficient_data = pd.DataFrame(coefficient,col,columns = ['coefficient'])
coefficient_data = coefficient_data.sort_values('coefficient',ascending=False)
# visual affect
coefficient_data.plot(kind ='bar',figsize=(12,8),color='gold',fontsize = 18)
plt.title('\n Coefficients and its values\n',fontsize=34)
# only two most affecting coefficients
print('\n')
for i in range(2):
print(coefficient_data.index[i])
|
[
"noreply@github.com"
] |
yanivmm.noreply@github.com
|
207ca51d306160bcb1b64211690cf57342453446
|
2f43dd9eae7c3a290a50599305fac5106b2dd7cf
|
/webempresa/services/models.py
|
0277ee4a4986340cfebae70d45014dd4b5affc40
|
[] |
no_license
|
FernandoHer/maderamandala
|
2f4a1713ea4e067198f74ca00ae7197a606f3524
|
eec89b421337b36840ec5fe4ff65d773bba0d870
|
refs/heads/master
| 2022-11-25T16:37:08.917485
| 2020-04-07T20:54:15
| 2020-04-07T20:54:15
| 253,895,809
| 0
| 0
| null | 2022-11-22T02:25:07
| 2020-04-07T19:45:24
|
Python
|
UTF-8
|
Python
| false
| false
| 723
|
py
|
from django.db import models
# Create your models here.
class Service(models.Model):
title = models.CharField(max_length=200, verbose_name = "Titulo")
subtitle = models.CharField(max_length=200, verbose_name = "Subtitulo")
content = models.TextField(verbose_name = "Contenido")
image = models.ImageField(verbose_name = "Imagen", upload_to="services")
created = models.DateTimeField(auto_now_add=True, verbose_name = "Fecha de Creacion")
updated = models.DateTimeField(auto_now=True, verbose_name = "Fecha de actualizacion")
class Meta:
verbose_name = "servicio"
verbose_name_plural = "servicios"
ordering = ["-created"]
def __str__(self):
return self.title
|
[
"juanherdoiza@iMac-de-Juan.local"
] |
juanherdoiza@iMac-de-Juan.local
|
eca0f1c99ec492e8b3c6a27b02d6557f8aa3ae1b
|
84c2fa4aed9094b5ec3cc612d28980afe5d42d34
|
/leetcode/day11_24.py
|
a6e997a5b64e66b53c1eb8fab9ec554c45fcd371
|
[] |
no_license
|
cyg2695249540/generatewework
|
186831a1b5c788e9b99e90d1a08bf6a8638131ce
|
cd01b0fc4a69cc2f2ed4c109afdf8771bee3bffd
|
refs/heads/master
| 2023-01-20T17:13:13.186034
| 2020-12-01T12:05:01
| 2020-12-01T12:05:01
| 310,201,995
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 925
|
py
|
# !/usr/bin/env Python3
# -*- coding: utf-8 -*-
# @FILE : day11_24.py
# @Author : Pluto.
# @Time : 2020/11/24 16:06
"""
exp:66. 加一
给定一个由 整数 组成的 非空 数组所表示的非负整数,在该数的基础上加一。
最高位数字存放在数组的首位, 数组中每个元素只存储单个数字。
你可以假设除了整数 0 之外,这个整数不会以零开头。
示例1:
输入:digits = [1,2,3]
输出:[1,2,4]
解释:输入数组表示数字 123。
示例2:
输入:digits = [4,3,2,1]
输出:[4,3,2,2]
解释:输入数组表示数字 4321。
示例 3:
输入:digits = [0]
输出:[1]
提示:
1 <= digits.length <= 100
0 <= digits[i] <= 9
"""
def plusOne():
s="".join(str(x) for x in digits)
ss=str(int(s)+1)
r=[int(x) for x in ss]
return [0]*(len(digits)-len(r))+r
if __name__ == '__main__':
digits = [0, 0, 0]
print(plusOne())
|
[
"2695249540@qq.com"
] |
2695249540@qq.com
|
6d233bd2ae30ac3ff55e44e216f83f7ca5974969
|
887d21782f2741d8a273807642346ab7cd0dac6e
|
/list_files.py
|
4cbc2e8e2b36f40a7c36ec26d8d15585aba26e85
|
[] |
no_license
|
TheRealTimCameron/Sandbox
|
c375ff356710fe4a1935ddd86603731240c7283e
|
4778376c0a018065b50f6ba4abcd6cfac344d538
|
refs/heads/master
| 2020-04-30T13:37:00.562336
| 2019-03-21T03:46:29
| 2019-03-21T03:46:29
| 176,863,481
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
import os
print("The files and folders in {} are:".format(os.getcwd()))
items = os.listdir('.')
for item in items:
print(item)
|
[
"TimCameron56@gmail.com"
] |
TimCameron56@gmail.com
|
f75a10dc0ef05561c5371a193810ff7eefcf5c22
|
b76aa6044aa0971bc7842cd4c3faa281c9c0e5cd
|
/1044_multiplos.py
|
f0455f370926483d5f3d396afe4542b00c05b844
|
[] |
no_license
|
Miguelsantos101/algoritmos1-2021-1
|
8496233f6d37bd70e47949c7e23b34e6c2181bd1
|
fe03097d870e4f47796e69c97020f9c0bdba0cab
|
refs/heads/main
| 2023-05-03T23:08:53.669522
| 2021-05-27T02:33:12
| 2021-05-27T02:33:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
#a, b = input().split()
#a = int(a)
#b = int(b)
a, b = map(int, input().split())
if a < b:
temp = b
b = a
a = temp
if a % b == 0:
print("Sao Multiplos")
else:
print("Nao sao Multiplos")
|
[
"carloshiga@alumni.usp.br"
] |
carloshiga@alumni.usp.br
|
c596b6a116427c9d0e40510a7bac545c5ed464a6
|
f98f6746851790aabeb996fafe74a24236bb580d
|
/is_prime_number.py
|
373c717dc73f6683a6918d54130d6e0b43452f31
|
[] |
no_license
|
licheeee/PythonProject
|
b8c619cfbbe2f0e70284ffc2c0e9283c41d6f58c
|
9c114f32b51e6f8dc275cb36cb8b0e05e1c42548
|
refs/heads/master
| 2020-04-23T06:12:04.958043
| 2019-10-17T15:15:58
| 2019-10-17T15:15:58
| 170,965,853
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 323
|
py
|
# -*- coding: UTF-8 -*-
# 判断一个数字是否是质数
num = int(input("Please input a number :"))
primeFlag = True
sqrt = int(num ** 0.5)
for i in range(2, sqrt + 1):
if (num % i) == 0:
print("{0} is not a prime number.".format(num))
break
else:
print("{0} is a prime number.".format(num))
|
[
"qiaoxw@outlook.com"
] |
qiaoxw@outlook.com
|
edbfd9f211f972906a7be68a3b1de4ba080d1d03
|
4e2a22470c983bc6f8463b4d0bd2563e2b4fadba
|
/manage.py
|
91afffd0eea54135379279692eb3ab4988697b8b
|
[] |
no_license
|
payush/ayush-crowdbotics-375
|
8537f9a86fcdcda7418a0c10a5f258bafc07dd9c
|
c11bdd721d91e765bcb04379dac476279e6ca599
|
refs/heads/master
| 2020-03-23T22:34:09.700234
| 2018-07-24T16:09:19
| 2018-07-24T16:09:19
| 142,182,994
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 819
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ayush_crowdbotics_375.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"ayushpuroheet@gmail.com"
] |
ayushpuroheet@gmail.com
|
c6119fca8e49b7cc3081a8d3441946e564c44017
|
24a3645595fb5aa6f4e0484c8b9e6fbcf31ae5a5
|
/rl_loop/train_and_validate.py
|
3be4f997d7bb5dc358cc305377386e12b7305276
|
[
"Apache-2.0"
] |
permissive
|
2series/minigo
|
dcec298021e714fb8e203b847dd2d7a9d9451823
|
fda1487dff94a710e9359f80c28d08d99d6c3e3c
|
refs/heads/master
| 2020-04-05T20:35:18.809871
| 2018-11-12T09:18:53
| 2018-11-12T09:18:53
| 157,187,163
| 1
| 0
| null | 2018-11-12T09:20:43
| 2018-11-12T09:20:42
| null |
UTF-8
|
Python
| false
| false
| 3,698
|
py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Run train and validate in a loop, as subprocesses.
We run as subprocesses because it gives us some isolation.
"""
import itertools
import os
import sys
import time
sys.path.insert(0, '.')
from absl import app, flags
from tensorflow import gfile
from rl_loop import fsdb
import mask_flags
from rl_loop import shipname
import utils
flags.DEFINE_string('pro_dataset', None,
'Location of preprocessed pro dataset for validation')
# From fsdb.py - must pass one of the two.
flags.declare_key_flag('base_dir')
flags.declare_key_flag('bucket_name')
FLAGS = flags.FLAGS
try:
TPU_NAME = os.environ['TPU_NAME']
except KeyError:
raise Exception("Must have $TPU_NAME configured")
def train():
model_num, model_name = fsdb.get_latest_model()
print("Training on gathered game data, initializing from {}".format(
model_name))
new_model_num = model_num + 1
new_model_name = shipname.generate(new_model_num)
print("New model will be {}".format(new_model_name))
training_file = os.path.join(
fsdb.golden_chunk_dir(), str(new_model_num) + '.tfrecord.zz')
while not gfile.Exists(training_file):
print("Waiting for", training_file)
time.sleep(1 * 60)
save_file = os.path.join(fsdb.models_dir(), new_model_name)
cmd = ['python3', 'train.py', training_file,
'--use_tpu',
'--tpu_name={}'.format(TPU_NAME),
'--flagfile=rl_loop/distributed_flags',
'--export_path={}'.format(save_file)]
return mask_flags.run(cmd)
def validate_holdout_selfplay():
"""Validate on held-out selfplay data."""
holdout_dirs = (os.path.join(fsdb.holdout_dir(), d)
for d in reversed(gfile.ListDirectory(fsdb.holdout_dir()))
if gfile.IsDirectory(os.path.join(fsdb.holdout_dir(), d))
for f in gfile.ListDirectory(os.path.join(fsdb.holdout_dir(), d)))
# This is a roundabout way of computing how many hourly directories we need
# to read in order to encompass 20,000 holdout games.
holdout_dirs = set(itertools.islice(holdout_dirs), 20000)
cmd = ['python3', 'validate.py'] + list(holdout_dirs) + [
'--use_tpu',
'--tpu_name={}'.format(TPU_NAME),
'--flagfile=rl_loop/distributed_flags',
'--expand_validation_dirs']
mask_flags.run(cmd)
def validate_pro():
"""Validate on professional data."""
cmd = ['python3', 'validate.py', FLAGS.pro_dataset,
'--use_tpu',
'--tpu_name={}'.format(TPU_NAME),
'--flagfile=rl_loop/distributed_flags',
'--validate_name=pro']
mask_flags.run(cmd)
def loop(unused_argv):
while True:
print("=" * 40)
with utils.timer("Train"):
completed_process = train()
if completed_process.returncode > 0:
print("Training failed! Skipping validation...")
continue
with utils.timer("Validate"):
validate_pro()
validate_holdout_selfplay()
if __name__ == '__main__':
flags.mark_flag_as_required('pro_dataset')
app.run(loop)
|
[
"brian.kihoon.lee@gmail.com"
] |
brian.kihoon.lee@gmail.com
|
f03e0bd25b2839aff153fea90abc924e46a6584e
|
1bdc56d1f66501bada19b277a47655dc99f44f2e
|
/const.py
|
c6f26b24ff8369d500a57235ad9d6e874677e6b2
|
[] |
no_license
|
antista/pacman
|
682811715b930db0c8765d5da9340f91d8f4e8b7
|
adb51eb219c6758dc553671ddc68db700a6df358
|
refs/heads/master
| 2020-04-17T07:56:49.953567
| 2019-01-18T11:02:31
| 2019-01-18T11:02:31
| 166,391,144
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,379
|
py
|
import pyganim
from pygame import *
from pyganim import *
SOUNDS = dict(
wakka='sounds/wakka.wav',
energizer='sounds/energizer.wav',
eat_ghost='sounds/eating_ghost.wav',
death='sounds/death.wav'
)
SIZE = 16
BACK_COLOR = "#00FFFF"
ANIMATION_DELAY = 50 # скорость смены кадров
ANIMATION = dict()
ANIMATION['right'] = [('images/moving/m1.ico'),
('images/moving/m2.ico'),
('images/moving/m3.ico'),
('images/moving/m4.ico'),
('images/moving/m5.ico'),
('images/moving/m6.ico'),
('images/moving/m6.ico'),
('images/moving/m5.ico'),
('images/moving/m4.ico'),
('images/moving/m3.ico'),
('images/moving/m2.ico'),
('images/moving/m1.ico')]
ANIMATION['left'] = [pygame.transform.rotate(image.load('images/moving/m1.ico'), 180),
pygame.transform.rotate(image.load('images/moving/m2.ico'), 180),
pygame.transform.rotate(image.load('images/moving/m3.ico'), 180),
pygame.transform.rotate(image.load('images/moving/m4.ico'), 180),
pygame.transform.rotate(image.load('images/moving/m5.ico'), 180),
pygame.transform.rotate(image.load('images/moving/m6.ico'), 180),
pygame.transform.rotate(image.load('images/moving/m6.ico'), 180),
pygame.transform.rotate(image.load('images/moving/m5.ico'), 180),
pygame.transform.rotate(image.load('images/moving/m4.ico'), 180),
pygame.transform.rotate(image.load('images/moving/m3.ico'), 180),
pygame.transform.rotate(image.load('images/moving/m2.ico'), 180),
pygame.transform.rotate(image.load('images/moving/m1.ico'), 180)]
ANIMATION['up'] = [pygame.transform.rotate(image.load('images/moving/m1.ico'), 90),
pygame.transform.rotate(image.load('images/moving/m2.ico'), 90),
pygame.transform.rotate(image.load('images/moving/m3.ico'), 90),
pygame.transform.rotate(image.load('images/moving/m4.ico'), 90),
pygame.transform.rotate(image.load('images/moving/m5.ico'), 90),
pygame.transform.rotate(image.load('images/moving/m6.ico'), 90),
pygame.transform.rotate(image.load('images/moving/m6.ico'), 90),
pygame.transform.rotate(image.load('images/moving/m5.ico'), 90),
pygame.transform.rotate(image.load('images/moving/m4.ico'), 90),
pygame.transform.rotate(image.load('images/moving/m3.ico'), 90),
pygame.transform.rotate(image.load('images/moving/m2.ico'), 90),
pygame.transform.rotate(image.load('images/moving/m1.ico'), 90)]
ANIMATION['down'] = [pygame.transform.rotate(image.load('images/moving/m1.ico'), -90),
pygame.transform.rotate(image.load('images/moving/m2.ico'), -90),
pygame.transform.rotate(image.load('images/moving/m3.ico'), -90),
pygame.transform.rotate(image.load('images/moving/m4.ico'), -90),
pygame.transform.rotate(image.load('images/moving/m5.ico'), -90),
pygame.transform.rotate(image.load('images/moving/m6.ico'), -90),
pygame.transform.rotate(image.load('images/moving/m6.ico'), -90),
pygame.transform.rotate(image.load('images/moving/m5.ico'), -90),
pygame.transform.rotate(image.load('images/moving/m4.ico'), -90),
pygame.transform.rotate(image.load('images/moving/m3.ico'), -90),
pygame.transform.rotate(image.load('images/moving/m2.ico'), -90),
pygame.transform.rotate(image.load('images/moving/m1.ico'), -90)]
ANIMATION_STAY = dict()
ANIMATION_STAY['left'] = [(pygame.transform.rotate(image.load('images/moving/m6.ico'), 180), 1)]
ANIMATION_STAY['right'] = [('images/moving/m6.ico', 1)]
ANIMATION_STAY['up'] = [(pygame.transform.rotate(image.load('images/moving/m6.ico'), 90), 1)]
ANIMATION_STAY['down'] = [(pygame.transform.rotate(image.load('images/moving/m6.ico'), -90), 1)]
|
[
"anti2100@yandex.ru"
] |
anti2100@yandex.ru
|
1cb69e60aa615509cf524ab1fb086168647ae432
|
7dc80048f72e106f977b49ea882c63cc9623e3ef
|
/notebooks/other/Y2017M07D28_RH_python27setup_v01.py
|
250e214bbfc2fd21afe44797cb7e69bbeb700a16
|
[] |
no_license
|
YanCheng-go/Aqueduct30Docker
|
8400fdea23bfd788f9c6de71901e6f61530bde38
|
6606fa03d145338d48101fc53ab4a5fccf3ebab2
|
refs/heads/master
| 2022-12-16T03:36:25.704103
| 2020-09-09T14:38:28
| 2020-09-09T14:38:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 742
|
py
|
# coding: utf-8
# # Test Python 27 setup
#
# * Purpose of script: test python 27 environement against several libraries
# * Author: Rutger Hofste
# * Kernel used: python27
# * Date created: 20170728
#
#
# In[3]:
packages = {"earth engine":-1,"gdal":-1,"geopandas":-1,"arcgis":-1}
# In[6]:
try:
import ee
packages["earth engine"]=1
except:
packages["earth engine"]=0
# In[4]:
try:
from osgeo import gdal
packages["gdal"]=1
except:
packages["gdal"]=0
# In[10]:
try:
import geopandas
packages["geopandas"]=1
except:
packages["geopandas"]=0
# In[11]:
try:
import arcgis.gis
packages["arcgis"]=1
except:
packages["arcgis"]=0
# In[12]:
print(packages)
# In[ ]:
|
[
"rutgerhofste@gmail.com"
] |
rutgerhofste@gmail.com
|
a083a001d9f5a9559169c82b7ac70022a8d131c7
|
c534fba89ff0462334cc724ff4010cbed829e294
|
/web/myadmin/migrations/0012_auto_20191019_1638.py
|
8bbaa46d7e85d15be38f10c54609829eb800d7f6
|
[] |
no_license
|
victorfengming/python_bookshop
|
974f5f8ff3b53b024b573f0f256409204116e114
|
c0a4757fc2031a015d4b198ba889be69a2a4a3c5
|
refs/heads/master
| 2020-09-02T18:02:07.547345
| 2019-11-04T15:10:44
| 2019-11-04T15:10:44
| 219,275,403
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 366
|
py
|
# Generated by Django 2.2.3 on 2019-10-19 16:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('myadmin', '0011_auto_20191018_2225'),
]
operations = [
migrations.DeleteModel(
name='Booktype',
),
migrations.DeleteModel(
name='Users',
),
]
|
[
"fengming19981221@163.com"
] |
fengming19981221@163.com
|
5fa8134299debad3891dee51566474f0fd8a89e0
|
e8411c4506c106ce0a378f8a1a86c7b83363867c
|
/src/kmeans.py
|
b4a8131cc89b91160836751d37a336cc6d1d59a9
|
[] |
no_license
|
rdyzakya/UnsupervisedLearning
|
d833d49feed7ebe41ef8fa855704ec56ed830de2
|
0a0e6a9f9d0b9cc03816384307556d217f3ac70e
|
refs/heads/main
| 2023-06-26T01:19:09.690564
| 2021-07-25T11:08:13
| 2021-07-25T11:08:13
| 382,757,126
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,165
|
py
|
import numpy as np
import time
class DistanceMethodNotValidError(Exception):
pass
class NotSameLength(Exception):
pass
class KMeansClustering:
def __init__(self):
self.centroids = None
self.x_columns = None
self.how = None
self.df = None
def euclidean_distance(self,this_row,other):
res = 0
for cols in self.x_columns:
delta = this_row[cols] - other[cols]
delta_sqr = delta**2
res += delta_sqr
return np.sqrt(res)
def manhattan_distance(self,this_row,other):
res = 0
for cols in self.x_columns:
delta = this_row[cols] - other[cols]
delta_abs = np.abs(delta)
res += delta_abs
return res
def calculate_nearest(self,row,how='euclidean'):
dist = [0 for i in range(len(self.centroids))]
dist = np.array(dist)
for i in range(len(self.centroids)):
if how == 'euclidean':
dist[i] = self.euclidean_distance(row,self.centroids.loc[i])
elif how == 'manhattan':
dist[i] = self.manhattan_distance(row,self.centroids.loc[i])
else:
raise DistanceMethodNotValidError()
min_idx = np.where(dist == dist.min())[0][0]
return min_idx
def fit(self,df_,x_columns,k,how='euclidean'):
df = df_.copy()
self.x_columns = [df.columns[i] for i in x_columns]
self.centroids = df.sample(k).copy()
self.centroids = self.centroids.reset_index()
self.centroids = self.centroids[self.x_columns]
self.how = how
df['Label'] = np.nan
df['New Label'] = np.nan
while False in (df['Label'] == df['New Label']).unique():
df['Label'] = df.apply(lambda row: self.calculate_nearest(row[self.x_columns],self.how),axis=1)
for i in range(len(self.centroids)):
df_i = df[df['Label'] == i]
means = df_i.mean()
for col in self.x_columns:
self.centroids.loc[i,col] = means[col]
df['New Label'] = df.apply(lambda row: self.calculate_nearest(row[self.x_columns],self.how),axis=1)
df['Label'] = df['New Label']
del df['New Label']
self.df = df
def predict(self,data):
if len(self.x_columns) != len(data):
raise NotSameLength()
temp = data
data = {}
for i in range(len(self.x_columns)):
data[self.x_columns[i]] = temp[i]
return self.calculate_nearest(data,self.how)
|
[
"impper1@gmail.com"
] |
impper1@gmail.com
|
ff90cd1f1161c0d09ab2942b7f313e655ef548a0
|
a6bd898302ffebe9066595b264f9e5e38e6fa8e6
|
/settings_template.py
|
069b2d192200ef4343a3508486203a989c2cb909
|
[] |
no_license
|
symroe/teamprime_retweets
|
65e8ec57095b138be45496eb115fb4da1d1e1af0
|
08e817da6191a8058b3606b076ba9de6bd253b12
|
refs/heads/master
| 2021-01-10T22:04:16.968867
| 2013-09-20T13:32:03
| 2013-09-20T13:32:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
CONSUMER_KEY = ""
CONSUMER_SECRET = ""
ACCESS_TOKEN_KEY = ""
ACCESS_TOKEN_SECRET = ""
username = "TeamPrimeLtd"
TWEET_PATH = "tweets"
|
[
"sym.roe@talusdesign.co.uk"
] |
sym.roe@talusdesign.co.uk
|
7f19e8afa6fdab3a0d7af9f55578ca1ba59afa65
|
81061f903318fceac254b60cd955c41769855857
|
/server/paiements/migrations/0003_auto__chg_field_transaction_extra_data.py
|
b059e9dea63be589ea180dbfe9a60bdc411cea7a
|
[
"BSD-2-Clause"
] |
permissive
|
agepoly/polybanking
|
1e253e9f98ba152d9c841e7a72b7ee7cb9d9ce89
|
f8f19399585293ed41abdab53609ecb8899542a2
|
refs/heads/master
| 2020-04-24T06:15:16.606580
| 2015-10-26T19:52:03
| 2015-10-26T19:52:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,133
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Transaction.extra_data'
db.alter_column(u'paiements_transaction', 'extra_data', self.gf('django.db.models.fields.TextField')(null=True))
def backwards(self, orm):
# Changing field 'Transaction.extra_data'
db.alter_column(u'paiements_transaction', 'extra_data', self.gf('django.db.models.fields.TextField')(default=''))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'configs.config': {
'Meta': {'object_name': 'Config'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'admin_enable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'allowed_users': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key_api': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'key_ipn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'key_request': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'test_mode': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'url_back_err': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'url_back_ok': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'url_ipn': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'paiements.transaction': {
'Meta': {'object_name': 'Transaction'},
'amount': ('django.db.models.fields.IntegerField', [], {}),
'config': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['configs.Config']"}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'extra_data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_status': ('django.db.models.fields.CharField', [], {'default': "'cr'", 'max_length': '2'}),
'ipn_needed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_ipn_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_postfinance_ipn_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_user_back_from_postfinance_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_userforwarded_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'postfinance_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'postfinance_status': ('django.db.models.fields.CharField', [], {'default': "'??'", 'max_length': '2'}),
'reference': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'paiements.transctionlog': {
'Meta': {'object_name': 'TransctionLog'},
'extra_data': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'log_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'transaction': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['paiements.Transaction']"}),
'when': ('django.db.models.fields.DateTimeField', [], {})
}
}
complete_apps = ['paiements']
|
[
"maximilien@theglu.org"
] |
maximilien@theglu.org
|
c7fe334fcb246d191e7db56465b77abd86f98947
|
83412c7effe6a47e423fb55541d768d1bb308de0
|
/HW1/code/src/titanic.py
|
90f303852d55635b43e5ce3301f6e9b3daf995ab
|
[] |
no_license
|
atibhav21/CSM146
|
7be041ae972ebd3a78c01e2a98075f66f875e9f4
|
ab806ec5fe23a7b36e503b304445b0efe83f12d5
|
refs/heads/master
| 2021-09-09T05:52:27.155543
| 2018-03-14T02:52:46
| 2018-03-14T02:52:46
| 117,149,527
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,174
|
py
|
"""
Author : Yi-Chieh Wu, Sriram Sankararaman
Description : Titanic
"""
# Use only the provided packages!
import math
import csv
from util import *
from collections import Counter
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.cross_validation import train_test_split
from sklearn import metrics
import operator
######################################################################
# classes
######################################################################
class Classifier(object) :
"""
Classifier interface.
"""
def fit(self, X, y):
raise NotImplementedError()
def predict(self, X):
raise NotImplementedError()
class MajorityVoteClassifier(Classifier) :
def __init__(self) :
"""
A classifier that always predicts the majority class.
Attributes
--------------------
prediction_ -- majority class
"""
self.prediction_ = None
def fit(self, X, y) :
"""
Build a majority vote classifier from the training set (X, y).
Parameters
--------------------
X -- numpy array of shape (n,d), samples
y -- numpy array of shape (n,), target classes
Returns
--------------------
self -- an instance of self
"""
majority_val = Counter(y).most_common(1)[0][0]
self.prediction_ = majority_val
return self
def predict(self, X) :
"""
Predict class values.
Parameters
--------------------
X -- numpy array of shape (n,d), samples
Returns
--------------------
y -- numpy array of shape (n,), predicted classes
"""
if self.prediction_ is None :
raise Exception("Classifier not initialized. Perform a fit first.")
n,d = X.shape
y = [self.prediction_] * n
return y
class RandomClassifier(Classifier) :
def __init__(self) :
"""
A classifier that predicts according to the distribution of the classes.
Attributes
--------------------
probabilities_ -- class distribution dict (key = class, val = probability of class)
"""
self.probabilities_ = None
def fit(self, X, y) :
"""
Build a random classifier from the training set (X, y).
Parameters
--------------------
X -- numpy array of shape (n,d), samples
y -- numpy array of shape (n,), target classes
Returns
--------------------
self -- an instance of self
"""
### ========== TODO : START ========== ###
# part b: set self.probabilities_ according to the training set
classes = np.unique(y);
self.probabilities_ = {};
total_number = y.shape[0]
for i in classes:
self.probabilities_[int(i)] = len(np.where(y == i)[0])/float(total_number)
### ========== TODO : END ========== ###
return self
def predict(self, X, seed=1234) :
"""
Predict class values.
Parameters
--------------------
X -- numpy array of shape (n,d), samples
seed -- integer, random seed
Returns
--------------------
y -- numpy array of shape (n,), predicted classes
"""
if self.probabilities_ is None :
raise Exception("Classifier not initialized. Perform a fit first.")
np.random.seed(seed)
### ========== TODO : START ========== ###
# part b: predict the class for each test example
# hint: use np.random.choice (be careful of the parameters)
#print(self.probabilities_)
n = X.shape[0]
y = np.random.choice(2, n, p=[self.probabilities_[0], self.probabilities_[1]])
### ========== TODO : END ========== ###
return y
######################################################################
# functions
######################################################################
def plot_histograms(X, y, Xnames, yname) :
n,d = X.shape # n = number of examples, d = number of features
fig = plt.figure(figsize=(20,15))
nrow = 3; ncol = 3
for i in range(d) :
fig.add_subplot (3,3,i)
data, bins, align, labels = plot_histogram(X[:,i], y, Xname=Xnames[i], yname=yname, show = False)
n, bins, patches = plt.hist(data, bins=bins, align=align, alpha=0.5, label=labels)
plt.xlabel(Xnames[i])
plt.ylabel('Frequency')
plt.legend() #plt.legend(loc='upper left')
plt.savefig ('histograms.pdf')
def plot_histogram(X, y, Xname, yname, show = True) :
"""
Plots histogram of values in X grouped by y.
Parameters
--------------------
X -- numpy array of shape (n,d), feature values
y -- numpy array of shape (n,), target classes
Xname -- string, name of feature
yname -- string, name of target
"""
# set up data for plotting
targets = sorted(set(y))
data = []; labels = []
for target in targets :
features = [X[i] for i in range(len(y)) if y[i] == target]
data.append(features)
labels.append('%s = %s' % (yname, target))
# set up histogram bins
features = set(X)
nfeatures = len(features)
test_range = list(range(int(math.floor(min(features))), int(math.ceil(max(features)))+1))
if nfeatures < 10 and sorted(features) == test_range:
bins = test_range + [test_range[-1] + 1] # add last bin
align = 'left'
else :
bins = 10
align = 'mid'
# plot
if show == True:
plt.figure()
n, bins, patches = plt.hist(data, bins=bins, align=align, alpha=0.5, label=labels)
plt.xlabel(Xname)
plt.ylabel('Frequency')
plt.legend() #plt.legend(loc='upper left')
#plt.show()
return data, bins, align, labels
def error(clf, X, y, ntrials=100, test_size=0.2) :
"""
Computes the classifier error over a random split of the data,
averaged over ntrials runs.
Parameters
--------------------
clf -- classifier
X -- numpy array of shape (n,d), features values
y -- numpy array of shape (n,), target classes
ntrials -- integer, number of trials
Returns
--------------------
train_error -- float, training error
test_error -- float, test error
"""
### ========== TODO : START ========== ###
# compute cross-validation error over ntrials
# hint: use train_test_split (be careful of the parameters)
train_error = 0
test_error = 0
for i in range(1, ntrials+1):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, train_size=0.8, random_state=i)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_train) # compute the training error
train_error += 1 - metrics.accuracy_score(y_train, y_pred, normalize=True)
y_pred = clf.predict(X_test) # compute the test error
test_error += 1 - metrics.accuracy_score(y_test, y_pred, normalize=True)
train_error = float(train_error) / ntrials
test_error = float(test_error) / ntrials # average the errors out
### ========== TODO : END ========== ###
return train_error, test_error
def write_predictions(y_pred, filename, yname=None) :
"""Write out predictions to csv file."""
out = open(filename, 'wb')
f = csv.writer(out)
if yname :
f.writerow([yname])
f.writerows(list(zip(y_pred)))
out.close()
######################################################################
# main
######################################################################
def main():
# load Titanic dataset
titanic = load_data("titanic_train.csv", header=1, predict_col=0)
X = titanic.X; Xnames = titanic.Xnames
y = titanic.y; yname = titanic.yname
n,d = X.shape # n = number of examples, d = number of features
#========================================
# part a: plot histograms of each feature
print('Plotting...')
for i in range(d) :
plot_histogram(X[:,i], y, Xname=Xnames[i], yname=yname)
plt.close('all')
#========================================
# train Majority Vote classifier on data
print('Classifying using Majority Vote...')
clf = MajorityVoteClassifier() # create MajorityVote classifier, which includes all model parameters
clf.fit(X, y) # fit training data using the classifier
y_pred = clf.predict(X) # take the classifier and run it on the training data
train_error = 1 - metrics.accuracy_score(y, y_pred, normalize=True)
print('\t-- training error: %.3f' % train_error)
majority_vote_error = train_error
### ========== TODO : START ========== ###
# part b: evaluate training error of Random classifier
print('Classifying using Random...')
rand_clf = RandomClassifier()
rand_clf.fit(X, y)
y_pred = rand_clf.predict(X)
train_error = 1 - metrics.accuracy_score(y, y_pred, normalize=True)
print('\t-- training error: %.3f' % train_error)
random_clf_error = train_error;
### ========== TODO : END ========== ###
### ========== TODO : START ========== ###
# part c: evaluate training error of Decision Tree classifier
# use criterion of "entropy" for Information gain
print('Classifying using Decision Tree...')
decision_tree_clf = DecisionTreeClassifier(criterion="entropy")
decision_tree_clf.fit(X, y)
y_pred = decision_tree_clf.predict(X)
train_error = 1 - metrics.accuracy_score(y, y_pred, normalize=True)
print('\t-- training error: %.3f' % train_error)
### ========== TODO : END ========== ###
# note: uncomment out the following lines to output the Decision Tree graph
"""
# save the classifier -- requires GraphViz and pydot
import StringIO, pydot
from sklearn import tree
dot_data = StringIO.StringIO()
tree.export_graphviz(clf, out_file=dot_data,
feature_names=Xnames)
graph = pydot.graph_from_dot_data(dot_data.getvalue())
graph.write_pdf("dtree.pdf")
"""
### ========== TODO : START ========== ###
# part d: evaluate training error of k-Nearest Neighbors classifier
# use k = 3, 5, 7 for n_neighbors
print('Classifying using k-Nearest Neighbors...')
for k in (3, 5, 7):
k_Nearest_clf = KNeighborsClassifier(n_neighbors=k)
k_Nearest_clf.fit(X, y)
y_pred = k_Nearest_clf.predict(X)
train_error = 1 - metrics.accuracy_score(y, y_pred, normalize=True)
print('\t-- training error (k = %d): %.3f' % (k, train_error))
# Redeclare it for part e
k_Nearest_clf = KNeighborsClassifier(n_neighbors=5)
### ========== TODO : END ========== ###
### ========== TODO : START ========== ###
# part e: use cross-validation to compute average training and test error of classifiers
print('Investigating various classifiers...')
for classifier in (clf, rand_clf, decision_tree_clf, k_Nearest_clf):
train_error, test_error = error(classifier, X, y)
print('\t--Train Error:%.3f Test Error%.3f Classifier: %s' % (train_error, test_error, classifier.__class__.__name__ ))
### ========== TODO : END ========== ###
### ========== TODO : START ========== ###
# part f: use 10-fold cross-validation to find the best value of k for k-Nearest Neighbors classifier
print('Finding the best k for KNeighbors classifier...')
x_points = []
y_points = []
for k in range(1, 50, 2):
x_points.append(k)
k_Nearest_clf = KNeighborsClassifier(n_neighbors=k)
k_y = 1 - cross_val_score(k_Nearest_clf, X, y, scoring='accuracy', cv=10)
y_points.append(sum(k_y) / len(k_y))
plt.plot(x_points, y_points)
plt.xlabel('Number of neighbors')
plt.ylabel('Average Error')
plt.show()
### ========== TODO : END ========== ###
### ========== TODO : START ========== ###
# part g: investigate decision tree classifier with various depths
print('Investigating depths...')
x_points = []
y_test_points = []
y_train_points = []
for k in range(1, 21):
decision_tree_clf = DecisionTreeClassifier(criterion='entropy', max_depth=k)
train_error, test_error = error(decision_tree_clf, X, y)
x_points.append(k)
y_test_points.append(test_error)
y_train_points.append(train_error)
plt.plot(x_points, y_train_points, label='Training Error')
plt.plot(x_points, y_test_points, label='Test Error')
plt.plot(x_points, [majority_vote_error] * len(x_points), label='Majority Vote Classifier error')
plt.plot(x_points, [random_clf_error] * len(x_points), label='Random Classifier error')
plt.legend(loc='upper right')
plt.xlabel('Depth')
plt.ylabel('Average Error')
plt.ylim(ymax=0.7)
plt.show()
#plt.close('all')
### ========== TODO : END ========== ###
### ========== TODO : START ========== ###
# part h: investigate Decision Tree and k-Nearest Neighbors classifier with various training set sizes
print('Investigating training set sizes...')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, train_size=0.9, random_state=42)
fraction_percentages = [x / 10.0 for x in range(1, 11)]
fraction_indices = [int(i * X_train.shape[0]) for i in fraction_percentages]
k_Nearest_clf = KNeighborsClassifier(n_neighbors=7)
decision_tree_clf = DecisionTreeClassifier(criterion='entropy', max_depth=6)
x_points = fraction_percentages[:]
y_points_decision_train = []
y_points_knn_train = []
y_points_decision_test =[]
y_points_knn_test = []
for end_index in fraction_indices:
X_train_set = X_train[:end_index+1]
y_train_set = y_train[:end_index+1]
k_Nearest_clf.fit(X_train_set, y_train_set)
decision_tree_clf.fit(X_train_set, y_train_set)
y_pred_knn_train = k_Nearest_clf.predict(X_train_set)
y_pred_decision_train = decision_tree_clf.predict(X_train_set)
y_pred_knn_test = k_Nearest_clf.predict(X_test)
y_pred_decision_test = decision_tree_clf.predict(X_test)
train_error_knn = 1 - metrics.accuracy_score(y_train_set, y_pred_knn_train, normalize=True)
test_error_knn = 1 - metrics.accuracy_score(y_test, y_pred_knn_test, normalize=True)
train_error_decision = 1 - metrics.accuracy_score(y_train_set, y_pred_decision_train, normalize=True)
test_error_decision = 1 - metrics.accuracy_score(y_test, y_pred_decision_test, normalize=True)
y_points_decision_train.append(train_error_decision)
y_points_decision_test.append(test_error_decision)
y_points_knn_train.append(train_error_knn)
y_points_knn_test.append(test_error_knn)
plt.plot(x_points, y_points_decision_train, label="Decision Tree Training Error")
plt.plot(x_points, y_points_decision_test, label="Decision Tree Test Error")
plt.plot(x_points, y_points_knn_train, label="KNearest Training Error")
plt.plot(x_points, y_points_knn_test, label="KNearest Test Error")
plt.plot(x_points, [majority_vote_error] * len(x_points), label='Majority Vote Classifier error')
plt.plot(x_points, [random_clf_error] * len(x_points), label='Random Classifier error')
plt.ylim(ymax=0.8)
plt.legend(loc='upper right')
plt.xlabel('Fraction of Training Data')
plt.ylabel('Error')
plt.show()
### ========== TODO : END ========== ###
print('Done')
if __name__ == "__main__":
main()
|
[
"atibhav.mittal6@gmail.com"
] |
atibhav.mittal6@gmail.com
|
6aa6cad3f09fd39c8de6b26302daf10e485cedb5
|
27ece9ab880a0bdba4b2c053eccda94602c716d5
|
/.history/save_20181129231105.py
|
50671059975cdfa4cf895b943b529349ae4d201e
|
[] |
no_license
|
Symfomany/keras
|
85e3ad0530837c00f63e14cee044b6a7d85c37b2
|
6cdb6e93dee86014346515a2017652c615bf9804
|
refs/heads/master
| 2020-04-08T20:21:35.991753
| 2018-11-30T08:23:36
| 2018-11-30T08:23:36
| 159,695,807
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,777
|
py
|
import os, argparse
import tensorflow as tf
# The original freeze_graph function
# from tensorflow.python.tools.freeze_graph import freeze_graph
dir = os.path.dirname(os.path.realpath(__file__))
def freeze_graph(model_dir, output_node_names):
"""Extract the sub graph defined by the output nodes and convert
all its variables into constant
Args:
model_dir: the root folder containing the checkpoint state file
output_node_names: a string, containing all the output node's names,
comma separated
"""
if not tf.gfile.Exists(model_dir):
raise AssertionError(
"Export directory doesn't exists. Please specify an export "
"directory: %s" % model_dir)
if not output_node_names:
print("You need to supply the name of a node to --output_node_names.")
return -1
# We retrieve our checkpoint fullpath
checkpoint = tf.train.get_checkpoint_state(model_dir)
input_checkpoint = checkpoint.model_checkpoint_path
# We precise the file fullname of our freezed graph
absolute_model_dir = "/".join(input_checkpoint.split('/')[:-1])
output_graph = absolute_model_dir + "/frozen_model.pb"
# We clear devices to allow TensorFlow to control on which device it will load operations
clear_devices = True
# We start a session using a temporary fresh Graph
with tf.Session(graph=tf.Graph()) as sess:
# We import the meta graph in the current default Graph
saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=clear_devices)
# We restore the weights
saver.restore(sess, input_checkpoint)
# We use a built-in TF helper to export variables to constants
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess, # The session is used to retrieve the weights
tf.get_default_graph().as_graph_def(), # The graph_def is used to retrieve the nodes
output_node_names.split(",") # The output node names are used to select the usefull nodes
)
# Finally we serialize and dump the output graph to the filesystem
with tf.gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
print("%d ops in the final graph." % len(output_graph_def.node))
return output_graph_def
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model_dir", type=str, default="models", help="Model folder to export")
parser.add_argument("--output_node_names", type=str, default="", help="The name of the output nodes, comma separated.")
args = parser.parse_args()
freeze_graph(args.model_dir, args.output_node_names)
|
[
"julien@meetserious.com"
] |
julien@meetserious.com
|
5854285ac06cf19046ef130cc5b0824d88f1507f
|
86e42c9f8576b9d4cda02aebf60b7820fe7e9bef
|
/version.py
|
a3220818626214d55f0e8543ad9c2875366cd45e
|
[
"MIT"
] |
permissive
|
ryansturmer/gitmake
|
4a5c25f8073ed07a5af25b4ffa093c47013548d8
|
8d6a2917af602f71dcdae0b142defaf529e9ee8c
|
refs/heads/master
| 2020-05-20T11:16:26.591662
| 2013-12-15T16:32:02
| 2013-12-15T16:32:02
| 11,926,238
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 62
|
py
|
major = 0
minor=0
patch=0
branch="dev"
timestamp=1376801630.58
|
[
"ryansturmer@gmail.com"
] |
ryansturmer@gmail.com
|
6cd666cf9ad2d4f9fbbfd2c624ff106e65444172
|
7c70f3cbaecfa4d77928c784ae12f232c273112e
|
/api_client/test_helper.py
|
92caf6ea0aa0d2d82ca5d95fe1af6896fce47376
|
[
"MIT"
] |
permissive
|
uktrade/lite-tests-common
|
d029298d9144a447404d38899ab35ff8e54bf53d
|
8ae386e55f899d0ffd61cc0a9156cd4db340d6d1
|
refs/heads/master
| 2020-08-03T19:20:39.673522
| 2020-07-21T09:59:01
| 2020-07-21T09:59:01
| 211,858,651
| 1
| 0
|
MIT
| 2020-07-21T09:59:03
| 2019-09-30T12:49:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,474
|
py
|
from .sub_helpers.documents import Documents
from .sub_helpers.applications import Applications
from .sub_helpers.cases import Cases
from .sub_helpers.document_templates import DocumentTemplates
from .sub_helpers.ecju_queries import EcjuQueries
from .sub_helpers.flags import Flags
from .sub_helpers.goods import Goods
from .sub_helpers.goods_queries import GoodsQueries
from .sub_helpers.organisations import Organisations
from .sub_helpers.ogel import Ogel
from .sub_helpers.parties import Parties
from .sub_helpers.picklists import Picklists
from .sub_helpers.queues import Queues
from .sub_helpers.users import Users
class TestHelper:
"""
Contains a collection of test helper classes, grouped by functional area, with each class containing
required logic wrapping calls to various LITE API endpoints.
"""
def __init__(self, api):
self.api_client = api
self.context = self.api_client.context
request_data = self.api_client.request_data
self.documents = Documents(api_client=self.api_client, request_data=request_data)
self.users = Users(api_client=self.api_client, request_data=request_data)
self.organisations = Organisations(api_client=self.api_client, request_data=request_data)
self.goods = Goods(api_client=self.api_client, documents=self.documents, request_data=request_data)
self.goods_queries = GoodsQueries(api_client=self.api_client, request_data=request_data)
self.parties = Parties(api_client=self.api_client, documents=self.documents, request_data=request_data)
self.ecju_queries = EcjuQueries(api_client=self.api_client, request_data=request_data)
self.picklists = Picklists(api_client=self.api_client, request_data=request_data)
self.ogel = Ogel(api_client=self.api_client, request_data=request_data)
self.cases = Cases(api_client=self.api_client, request_data=request_data)
self.flags = Flags(api_client=self.api_client, request_data=request_data)
self.queues = Queues(api_client=self.api_client, request_data=request_data)
self.document_templates = DocumentTemplates(api_client=self.api_client, request_data=request_data)
self.applications = Applications(
parties=self.parties,
goods=self.goods,
api_client=self.api_client,
documents=self.documents,
request_data=request_data,
organisations=self.organisations,
)
|
[
"noreply@github.com"
] |
uktrade.noreply@github.com
|
ace75a11edfb9adc326e867b401cc79979f5c7b9
|
991c3f7acbec5511441e62cb464bd77f9169c70c
|
/products/views.py
|
99fb6da645e9f42eddd0922ac853e550381a0e31
|
[] |
no_license
|
anshika-1999/ecommerce_project
|
44e78cf7b22ab0ac92fd673a9069b870c4ab3175
|
c9842516a80887205e7ce677f2c78d2608887670
|
refs/heads/master
| 2022-12-16T10:07:09.990805
| 2020-09-17T15:07:41
| 2020-09-17T15:07:41
| 296,339,336
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 686
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
from .models import Product
def productsHome(request):
allProds = []
catprods = Product.objects.values('category', 'product_id')
cats = {item['category'] for item in catprods}
for cat in cats:
prod = Product.objects.filter(category=cat)
allProds.append(prod)
params = {'allProds':allProds}
return render(request, 'products/productHome.html',params)
def home(request):
products = Product.objects.all()
params={'product':products}
return render(request, 'products/home.html',params)
def checkout(request):
return render(request, 'products/checkout.html')
|
[
"anshikag.1999@gmail.com"
] |
anshikag.1999@gmail.com
|
5aa68c22244a5396ea453095dedc1d96aba4aa72
|
d9b53673b899a9b842a42060740b734bf0c63a31
|
/leetcode/python/easy/p645_findErrorNums.py
|
0b9b378910292d7af736c77ca60c91c415bce9a7
|
[
"Apache-2.0"
] |
permissive
|
kefirzhang/algorithms
|
a8d656774b576295625dd663154d264cd6a6a802
|
549e68731d4c05002e35f0499d4f7744f5c63979
|
refs/heads/master
| 2021-06-13T13:05:40.851704
| 2021-04-02T07:37:59
| 2021-04-02T07:37:59
| 173,903,408
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
class Solution:
def findErrorNums(self, nums):
helper = [0] * len(nums)
for i in nums:
helper[i - 1] += 1
for i, n in enumerate(helper):
print(i, n)
if n == 0:
lack = i + 1
elif n == 2:
more = i + 1
return [more, lack]
slu = Solution()
print(slu.findErrorNums([1, 2, 2, 4]))
|
[
"8390671@qq.com"
] |
8390671@qq.com
|
b41e10890c9ac9413fe046efde3866bcd757844a
|
5b19f8512f3f8716f7e7b9b45380d3d9eb92565e
|
/app/app/settings.py
|
59760d31a9202a974de5e40adc3bffd206d90a84
|
[] |
no_license
|
raiatul14/taxi-app
|
a1daf11649b1de2e0f9942aa40dd193617641c50
|
37cf15ab77bb808494551300a25c8da8ed85645b
|
refs/heads/main
| 2023-06-26T15:54:49.244535
| 2021-07-24T14:34:24
| 2021-07-24T14:34:24
| 382,535,810
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,226
|
py
|
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
import datetime
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(int(os.environ.get('DEBUG', 0)))
ALLOWED_HOSTS = []
ALLOWED_HOSTS.extend(
filter(
None,
os.environ.get('ALLOWED_HOSTS', '').split(','),
)
)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
ASGI_APPLICATION = 'taxi.routing.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS')
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
AUTH_USER_MODEL = 'core.User'
#REDIS
REDIS_URL = os.environ.get('REDIS_URL', 'redis://localhost:6379')
#DJANGO CHANNELS
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels_redis.core.RedisChannelLayer',
'CONFIG': {
'hosts': [REDIS_URL],
},
},
}
#REST FRAMEWORK
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_simplejwt.authentication.JWTAuthentication',
'rest_framework.authentication.SessionAuthentication'
)
}
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': datetime.timedelta(minutes=5),
'REFRESH_TOKEN_LIFETIME': datetime.timedelta(days=1),
'USER_ID_CLAIM': 'id',
}
|
[
"atul.rai@ajackus.com"
] |
atul.rai@ajackus.com
|
07d3ff9572b4b5f0580105d33e1d8ada290fd157
|
d69fc0b185b045489d48ae8aa4caa4e33d01eb55
|
/hyperf-skeleton/h5/node_modules/utf-8-validate/build/config.gypi
|
e8575bc70334680c69c495bb08cb4e12499ce236
|
[
"MIT"
] |
permissive
|
4yop/miscellaneous
|
7993f2f314147019fc3e36f8b31ae6b7867a2f4f
|
3adee58f86c139f20926c80b1fb8c025127eef17
|
refs/heads/master
| 2023-04-26T11:18:23.755520
| 2022-08-25T15:55:13
| 2022-08-25T15:55:13
| 229,232,972
| 0
| 0
| null | 2023-04-19T20:05:15
| 2019-12-20T09:26:24
|
PHP
|
UTF-8
|
Python
| false
| false
| 3,345
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": [],
"msbuild_toolset": "v141",
"msvs_windows_target_platform_version": "10.0.17763.0"
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"debug_nghttp2": "false",
"debug_node": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "..\\..\\deps/icu-small\\source/data/in\\icudt65l.dat",
"icu_default_data": "",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "65",
"is_debug": 0,
"napi_build_version": "5",
"nasm_version": "2.14",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "true",
"node_module_version": 72,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_report": "true",
"node_shared": "false",
"node_shared_brotli": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "false",
"node_use_etw": "true",
"node_use_large_pages": "false",
"node_use_large_pages_script_lld": "false",
"node_use_node_code_cache": "true",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "true",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"shlib_suffix": "so.72",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"v8_use_snapshot": 1,
"want_separate_host_toolset": 0,
"nodedir": "C:\\Users\\Administrator\\AppData\\Local\\node-gyp\\Cache\\12.16.2",
"standalone_static_library": 1,
"msbuild_path": "D:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Community\\MSBuild\\15.0\\Bin\\MSBuild.exe",
"cache": "C:\\Users\\Administrator\\AppData\\Local\\npm-cache",
"globalconfig": "C:\\Users\\Administrator\\AppData\\Roaming\\npm\\etc\\npmrc",
"init_module": "C:\\Users\\Administrator\\.npm-init.js",
"metrics_registry": "https://registry.npm.taobao.org/",
"node_gyp": "C:\\Users\\Administrator\\AppData\\Roaming\\npm\\node_modules\\npm\\node_modules\\node-gyp\\bin\\node-gyp.js",
"prefix": "C:\\Users\\Administrator\\AppData\\Roaming\\npm",
"registry": "https://registry.npm.taobao.org/",
"userconfig": "C:\\Users\\Administrator\\.npmrc",
"user_agent": "npm/7.7.6 node/v12.16.2 win32 x64"
}
}
|
[
"1131559748@qq.com"
] |
1131559748@qq.com
|
249fd231624cd29de11204da14210b15135a09c1
|
9fa68d4b3332e557ac51ba4f9ed4b0e37e3011c8
|
/config_sz32_alpha1_5.py
|
489b0f46ccc029d35864f187efd38f6f659d3ef2
|
[] |
no_license
|
aizvorski/ndsb17
|
e545f2b82383e6f826cb31959b0dbefce25c9161
|
50322131a0cca20c24956b34eb787b65e044e23a
|
refs/heads/master
| 2021-06-13T20:16:22.928167
| 2017-04-12T10:46:01
| 2017-04-12T10:46:01
| 83,652,568
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 91
|
py
|
num_epochs = 500
samples_per_epoch = 10000
lr = 0.001
feature_sz = 32
feature_alpha = 1.5
|
[
"aizvorski@gmail.com"
] |
aizvorski@gmail.com
|
36d6fbac09d283afec24203a8c80c252d0e04c93
|
d7016f69993570a1c55974582cda899ff70907ec
|
/sdk/recoveryservices/azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/activestamp/aio/operations/_backup_protection_containers_operations.py
|
977185266eeefe7c362fb3931a8a7fd029b3b0e0
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
kurtzeborn/azure-sdk-for-python
|
51ca636ad26ca51bc0c9e6865332781787e6f882
|
b23e71b289c71f179b9cf9b8c75b1922833a542a
|
refs/heads/main
| 2023-03-21T14:19:50.299852
| 2023-02-15T13:30:47
| 2023-02-15T13:30:47
| 157,927,277
| 0
| 0
|
MIT
| 2022-07-19T08:05:23
| 2018-11-16T22:15:30
|
Python
|
UTF-8
|
Python
| false
| false
| 6,974
|
py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._backup_protection_containers_operations import build_list_request
from .._vendor import RecoveryServicesBackupClientMixinABC
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class BackupProtectionContainersOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.recoveryservicesbackup.activestamp.aio.RecoveryServicesBackupClient`'s
:attr:`backup_protection_containers` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(
self, vault_name: str, resource_group_name: str, filter: Optional[str] = None, **kwargs: Any
) -> AsyncIterable["_models.ProtectionContainerResource"]:
"""Lists the containers registered to Recovery Services Vault.
:param vault_name: The name of the recovery services vault. Required.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present. Required.
:type resource_group_name: str
:param filter: OData filter options. Default value is None.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProtectionContainerResource or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.recoveryservicesbackup.activestamp.models.ProtectionContainerResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2023-01-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.ProtectionContainerResourceList] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
filter=filter,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProtectionContainerResourceList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupProtectionContainers"
}
|
[
"noreply@github.com"
] |
kurtzeborn.noreply@github.com
|
c571164d09a9dfe8ee2571e96a5c3e2bb982d580
|
44064ed79f173ddca96174913910c1610992b7cb
|
/Second_Processing_app/temboo/Library/Google/Drive/Revisions/Delete.py
|
21b2277599036d6311d9fc5895330b8646d5bce5
|
[] |
no_license
|
dattasaurabh82/Final_thesis
|
440fb5e29ebc28dd64fe59ecd87f01494ed6d4e5
|
8edaea62f5987db026adfffb6b52b59b119f6375
|
refs/heads/master
| 2021-01-20T22:25:48.999100
| 2014-10-14T18:58:00
| 2014-10-14T18:58:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,974
|
py
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Delete
# Removes a revision.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class Delete(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the Delete Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
Choreography.__init__(self, temboo_session, '/Library/Google/Drive/Revisions/Delete')
def new_input_set(self):
return DeleteInputSet()
def _make_result_set(self, result, path):
return DeleteResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return DeleteChoreographyExecution(session, exec_id, path)
class DeleteInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the Delete
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((optional, string) A valid access token retrieved during the OAuth2 process. This is required unless you provide the ClientID, ClientSecret, and RefreshToken to generate a new access token.)
"""
InputSet._set_input(self, 'AccessToken', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Google. Required unless providing a valid AccessToken.)
"""
InputSet._set_input(self, 'ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((conditional, string) The Client Secret provided by Google. Required unless providing a valid AccessToken.)
"""
InputSet._set_input(self, 'ClientSecret', value)
def set_FileID(self, value):
"""
Set the value of the FileID input for this Choreo. ((required, string) The ID of the file.)
"""
InputSet._set_input(self, 'FileID', value)
def set_RefreshToken(self, value):
"""
Set the value of the RefreshToken input for this Choreo. ((conditional, string) An OAuth refresh token used to generate a new access token when the original token is expired. Required unless providing a valid AccessToken.)
"""
InputSet._set_input(self, 'RefreshToken', value)
def set_RevisionID(self, value):
"""
Set the value of the RevisionID input for this Choreo. ((required, string) The ID of the revision.)
"""
InputSet._set_input(self, 'RevisionID', value)
class DeleteResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the Delete Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Google.)
"""
return self._output.get('Response', None)
def get_NewAccessToken(self):
"""
Retrieve the value for the "NewAccessToken" output from this Choreo execution. ((string) Contains a new AccessToken when the RefreshToken is provided.)
"""
return self._output.get('NewAccessToken', None)
class DeleteChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return DeleteResultSet(response, path)
|
[
"dattasaurabh82@gmail.com"
] |
dattasaurabh82@gmail.com
|
037e407294716acc0cfc28a662b89b093dfe6d3b
|
b96edd0ba0a9f2a73a8ef8ed011714798fa72303
|
/test_all.py
|
b9cf4978ba413cb3c30ad1ea4439ec2cdc20a863
|
[] |
no_license
|
lijiahaoAA/lijiahao_cpsc_12lead
|
9401d9679d530183afba5a15f6efef9a96f2f154
|
663264920ead07493c0d8fe9987b9ab9a60d35fd
|
refs/heads/master
| 2023-03-14T07:11:28.996901
| 2021-03-09T07:08:50
| 2021-03-09T07:08:50
| 345,533,709
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,924
|
py
|
import time
import numpy as np
np.set_printoptions(threshold=np.inf)
import scipy.io as sio
import os
import config
from keras.preprocessing import sequence
import QRSDetectorOffline
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus']=False #用来正常显示负号
# os.environ["TF_CPP_MIN_LOG_LEVEL"] = '0'
# config = config.Config()
# a = os.listdir(config.train_mat_path)
# train_mat = [] # 存储所有6877个样本数据
# for i in range(len(a)):
# if a[i].endswith('.mat'):
# train_mat.append(config.train_mat_path + a[i])
#
# b = os.listdir(config.test_mat_path)
# test_mat = [] # 存储最终的测试数据
# for i in range(len(b)):
# if b[i].endswith('.mat'):
# test_mat.append(config.test_mat_path + b[i])
#
# def data_process(all_mat):
# ECG_1 = []
# ECG_2 = []
# ECG_3 = []
# #for recordpath in range(len(all_mat)):
# for recordpath in range(1):
# # load ECG
# mat = sio.loadmat(all_mat[recordpath])
# mat = np.array(mat['ECG']['data'][0, 0])
# mat = np.transpose(mat) # 做转置
# signal = mat
# ECG_1.append(signal)
# #print(signal.shape)
#
# qrsdetector = QRSDetectorOffline.QRSDetectorOffline(signal, config.sample_frequency, verbose=False,
# plot_data=False, show_plot=False)
# # denoise ECG 对每一导联进行去噪 滤波
# for i in range(signal.shape[1]):
# signal[:, i] = qrsdetector.bandpass_filter(signal[:, i], lowcut=0.5, highcut=49.0,
# signal_freq=config.sample_frequency, filter_order=1)
#
# ECG_2.append(signal)
# # print(ECG[0].shape)
# # print(ECG[0])
# # print(signal)
# # 将所有导联的长度填充为一样的,尾部补0
# # ECG_1 = sequence.pad_sequences(ECG_1, maxlen=3600, dtype='float32', truncating='post')
# ECG_2 = sequence.pad_sequences(ECG_2, maxlen=3600, dtype='float32', truncating='post')
# print(len(ECG_1))
# print(len(ECG_2))
# # plot_wave(ECG_1[0][:,0],ECG_2[0][:,0])
# calculate_max_min(ECG_2,ECG_1[0][:,0],ECG_2[0][:,0])
#
# #np.save('ECG_train_data_process_no_wave.npy', ECG)
# # np.save('ECG_train_data_process_3600QRS.npy', ECG)
# #np.save('ECG_test_data_process_no_wave.npy', ECG)
# # np.save('ECG_test_data_process_3600QRS.npy', ECG)
# return ECG_1, ECG_2
#
# def calculate_max_min(ECG,ECG_1,ECG_2):
# data = []
# tic = time.time()
# for i in range(len(ECG)):
# data.append(max(ECG[i][:, 0]))
# data.append(min(ECG[i][:, 0]))
#
# data.append(max(ECG[i][:, 1]))
# data.append(min(ECG[i][:, 1]))
#
# data.append(max(ECG[i][:, 2]))
# data.append(min(ECG[i][:, 2]))
#
# data.append(max(ECG[i][:, 3]))
# data.append(min(ECG[i][:, 3]))
#
# data.append(max(ECG[i][:, 4]))
# data.append(min(ECG[i][:, 4]))
#
# data.append(max(ECG[i][:, 5]))
# data.append(min(ECG[i][:, 5]))
#
# data.append(max(ECG[i][:, 6]))
# data.append(min(ECG[i][:, 6]))
#
# data.append(max(ECG[i][:, 7]))
# data.append(min(ECG[i][:, 7]))
#
# data.append(max(ECG[i][:, 8]))
# data.append(min(ECG[i][:, 8]))
#
# data.append(max(ECG[i][:, 9]))
# data.append(min(ECG[i][:, 9]))
#
# data.append(max(ECG[i][:, 10]))
# data.append(min(ECG[i][:, 10]))
#
# data.append(max(ECG[i][:, 11]))
# data.append(min(ECG[i][:, 11]))
#
# # print(len(data))
# with open("2.txt", 'w') as file:
# data1 = str(data)
# file.write(data1)
# file.close()
# max_data = max(data) # 训练集和测试集中在归一化到某个范围内时需要保证这个max_data和min_data是一致的
# min_data = min(data)
# normalization(ECG, config.max_data, config.min_data, ECG_1, ECG_2)
# print(max(data))
# print(min(data))
# toc = time.time()
# print("data normalization takes time:", toc - tic)
# return max_data,min_data
#
# # 数据归一化到指定区间
# def normalization(ECG, max_data, min_data, ECG_1, ECG_2):
# if(max_data - min_data == 0):
# print("分母为零,请检查")
# return
# k = (config.normalization_max - config.normalization_min)/((max_data - min_data) * 1.0) # 比例系数
# for i in range(len(ECG)):
# ECG[i][:, 0] = config.normalization_min + k * (ECG[i][:, 0] - min_data)
# ECG[i][:, 1] = config.normalization_min + k * (ECG[i][:, 1] - min_data)
# ECG[i][:, 2] = config.normalization_min + k * (ECG[i][:, 2] - min_data)
# ECG[i][:, 3] = config.normalization_min + k * (ECG[i][:, 3] - min_data)
# ECG[i][:, 4] = config.normalization_min + k * (ECG[i][:, 4] - min_data)
# ECG[i][:, 5] = config.normalization_min + k * (ECG[i][:, 5] - min_data)
# ECG[i][:, 6] = config.normalization_min + k * (ECG[i][:, 6] - min_data)
# ECG[i][:, 7] = config.normalization_min + k * (ECG[i][:, 7] - min_data)
# ECG[i][:, 8] = config.normalization_min + k * (ECG[i][:, 8] - min_data)
# ECG[i][:, 9] = config.normalization_min + k * (ECG[i][:, 9] - min_data)
# ECG[i][:, 10] = config.normalization_min + k * (ECG[i][:, 10] - min_data)
# ECG[i][:, 11] = config.normalization_min + k * (ECG[i][:, 11] - min_data)
#
# # np.save('ECG_train_data_normal.npy', ECG)
# # np.save('ECG_test_data_normal_500record.npy', ECG)
# plot_wave(ECG_1,ECG_2,ECG[0][:,0])
# return ECG
#
# def plot_wave(ECG_qrs, ECG_noqrs, ECG_3):
# plt.figure()
# print(len(ECG_qrs.shape))
# print(len(ECG_noqrs.shape))
# print(len(ECG_3.shape))
#
# plt.plot(range(3600), ECG_qrs[0:3600], color="red",label="去噪数据")
# # .plot(range(3600), ECG_noqrs, color="blue")
#
# plt.plot(range(3600), ECG_3, color="blue", label="归一化数据")
# plt.title("去噪数据波形对比归一化到[-3,3]数据波形")
# plt.xlabel("Time")
# plt.ylabel("Voltage")
# plt.legend(loc="best")
# plt.show()
#
# #data_process(train_mat)
# data_process(test_mat)
# from keras import backend as K
# from keras.layers import Lambda
# import tensorflow as tf
# def zeropad(x):
# y = K.zeros_like(x)
# print(y)
# return K.concatenate([x, y], axis=2)
#
def zeropad_output_shape(input_shape):
print(input_shape)
shape = list(input_shape)
shape[1] *= 2
print(shape)
return tuple(shape)
input = np.array([[1,2,3],[4,5,6]])
y = np.zeros_like(input)
new = np.concatenate([input, y], axis=1)
print(new)
zeropad_output_shape(input.shape)
# input = tf.convert_to_tensor(input)
# shortcut = Lambda(zeropad, output_shape=zeropad_output_shape)(input)
# print(shortcut)
|
[
"1301840357@qq.com"
] |
1301840357@qq.com
|
6028e1a80acb4dba764ef24342f833eb677eea1b
|
95a60a8fd8a21fcc3bcdcecfd4b6a3a3a3ff35b6
|
/backend/api.py
|
e175256a127f6184aa05edf3d108889e4af44c2b
|
[] |
no_license
|
AkshithBellare/maljpeg-web-app
|
f59cae2eff7f6446876b4a96b3143c2c38078927
|
ca83f1ef5bf95e73143aaa0e29b5c8f6f010936d
|
refs/heads/master
| 2023-03-29T12:37:29.864849
| 2021-04-05T13:16:43
| 2021-04-05T13:16:43
| 354,757,948
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,824
|
py
|
import os
import pickle
from flask import Flask
from flask_restful import Resource, Api, reqparse
from flask_cors import CORS
from flask import request, Response
import numpy as np
import json
import PIL.Image as Image
import io
import base64
from struct import unpack
import pandas as pd
import sys
import glob
marker_mapping = {
0xffc0: "SOF0",
0xffc1: "SOF1",
0xffc2: "SOF2",
0xffc3: "SOF3",
0xffc4: "DHT",
0xffc5: "SOF5",
0xffc6: "SOF6",
0xffc7: "SOF7",
0xffc8: "JPG",
0xffc9: "SOF9",
0xffca: "SOF10",
0xffcb: "SOF11",
0xffcc: "DAC",
0xffcd: "SOF13",
0xffce: "SOF14",
0xffcf: "SOF15",
0xffd0: "RST0",
0xffd1: "RST1",
0xffd2: "RST2",
0xffd3: "RST3",
0xffd4: "RST4",
0xffd5: "RST5",
0xffd6: "RST6",
0xffd7: "RST7",
0xffd8: "SOI",
0xffd9: "EOI",
0xffda: "SOS",
0xffdb: "DQT",
0xffdc: "DNL",
0xffdd: "DRI",
0xffde: "DHP",
0xffdf: "EXP",
0xffe0: "APP0",
0xffe1: "APP1",
0xffe2: "APP2",
0xffe3: "APP3",
0xffe4: "APP4",
0xffe5: "APP5",
0xffe6: "APP6",
0xffe7: "APP7",
0xffe8: "APP8",
0xffe9: "APP9",
0xffea: "APP10",
0xffeb: "APP11",
0xffec: "APP12",
0xffed: "APP13",
0xffee: "APP14",
0xffef: "APP15",
0xfff0: "JPG0",
0xfff1: "JPG1",
0xfff2: "JPG2",
0xfff3: "JPG3",
0xfff4: "JPG4",
0xfff5: "JPG5",
0xfff6: "JPG6",
0xfff7: "JPG7",
0xfff8: "JPG8",
0xfff9: "JPG9",
0xfffa: "JPG10",
0xfffb: "JPG11",
0xfffc: "JPG12",
0xfffd: "JPG13",
0xfffe: "COM",
0xff01: "TEM",
}
class JPEG:
def __init__(self, image_file):
with open(image_file, 'rb') as f:
self.img_data = f.read()
def decode(self):
data = self.img_data
marker_DQT_num = 0
marker_DQT_size_max = 0
marker_DHT_num = 0
marker_DHT_size_max = 0
file_markers_num = 0
marker_EOI_content_after_num = 0
marker_APP12_size_max = 0
marker_APP1_size_max = 0
marker_COM_size_max = 0
file_size = len(data)
print(f"file_size = {file_size}")
while(True):
try:
marker, = unpack(">H", data[0:2])
except:
print("error")
marker_map = marker_mapping.get(marker)
if marker_map != None:
file_markers_num += 1
if marker_map == "DQT":
marker_DQT_num += 1
lenchunk, = unpack(">H", data[2:4])
if lenchunk > marker_DQT_size_max:
marker_DQT_size_max = lenchunk
data = data[2+lenchunk:]
elif marker_map == "SOI":
data = data[2:]
elif marker_map == "DHT":
marker_DHT_num += 1
lenchunk, = unpack(">H", data[2:4])
if lenchunk > marker_DHT_size_max:
marker_DHT_size_max = lenchunk
data = data[2+lenchunk:]
elif marker_map == "EOI":
rem = data[2:]
if len(rem) > marker_EOI_content_after_num:
marker_EOI_content_after_num = len(rem)
data = rem
elif marker_map == "SOS":
data = data[-2:]
elif marker_map == "APP12":
lenchunk, = unpack(">H", data[2:4])
if lenchunk > marker_APP12_size_max:
marker_APP12_size_max = lenchunk
data = data[2+lenchunk:]
elif marker_map == "APP1":
lenchunk, = unpack(">H", data[2:4])
if lenchunk > marker_APP1_size_max:
marker_APP1_size_max = lenchunk
data = data[2+lenchunk:]
elif marker_map == "COM":
lenchunk, = unpack(">H", data[2:4])
if lenchunk > marker_COM_size_max:
marker_COM_size_max = lenchunk
data = data[2+lenchunk:]
elif marker_map == "TEM":
data = data[2:]
elif marker <= 0xffd9 and marker >= 0xffd0:
data = data[2:]
elif marker <= 0xffbf and marker >= 0xff02:
lenchunk, = unpack(">H", data[2:4])
data = data[2+lenchunk:]
else:
lenchunk, = unpack(">H", data[2:4])
data = data[2+lenchunk:]
else:
data = data[1:]
if (len(data) == 0):
data_list = [marker_EOI_content_after_num,marker_DQT_num,marker_DHT_num,file_markers_num, marker_DQT_size_max, marker_DHT_size_max,file_size, marker_COM_size_max,marker_APP1_size_max,marker_APP12_size_max,0]
return data_list
def extract_features():
img = JPEG("./server_files/saveimg.jpeg")
data_list = img.decode()
df = pd.DataFrame(data_list)
df = df.T
df.to_csv("test.csv")
app = Flask(__name__)
CORS(app)
api = Api(app)
parser = reqparse.RequestParser()
parser.add_argument("image")
class Predict(Resource):
def post(self):
args = parser.parse_args()
#request_data = json.loads(request.get_data())
#data = request_data['data']
#decodeit = open('saveimg.jpeg', 'wb')
#decodeit.write(base64.b64decode((data)))
#decodeit.close()
#print(type(data))
decodeit = open('./server_files/saveimg.jpeg', 'wb')
decodeit.write(base64.b64decode(bytes(args["image"], 'utf-8')))
decodeit.close()
extract_features()
return {"class" : "bening"}
api.add_resource(Predict, "/predict")
if __name__ == "__main__":
app.run(debug=True)
|
[
"akshithnm@gmail.com"
] |
akshithnm@gmail.com
|
58cd01e6622fd1b0f19af5cb10edacbcb384ce28
|
07e80d4b41d0db79bfc031b65894e28322d24e19
|
/zygoat/components/__init__.py
|
d0b864370a8ff64fa0bf89e99e2a898e0348e10e
|
[
"MIT"
] |
permissive
|
swang192/zygoat
|
0b021ad6cd8d286d265e22c5b27f1a8c4f18de6e
|
d00b6b1cc3a384b61e38845ff35dcbcc74a562d9
|
refs/heads/master
| 2021-02-05T21:47:14.089625
| 2020-02-28T04:03:16
| 2020-02-28T04:03:16
| 243,837,678
| 0
| 0
|
MIT
| 2020-02-28T19:19:33
| 2020-02-28T19:19:33
| null |
UTF-8
|
Python
| false
| false
| 320
|
py
|
from .base import Component # noqa
from .settings_component import SettingsComponent # noqa
from .editorconfig import editorconfig
from .docker_compose import docker_compose
from .backend import backend
from .frontend import frontend
components = [
editorconfig,
docker_compose,
backend,
frontend,
]
|
[
"markrawls96@gmail.com"
] |
markrawls96@gmail.com
|
a5a2a4b129d766a01b983be9cdcbdf3471ac18cb
|
f55eea6e52408c1400d8570b2a55ee8b9efb1a9e
|
/Python-Programming-Intermediate/Regular Expressions-164.py
|
f9d4e1a6d0ed4f1453780805d5c3d68ee17f3af1
|
[] |
no_license
|
CloudChaoszero/Data-Analyst-Track-Dataquest.io-Projects
|
a8b20c169fde8224c57bb85a845059072651e0e9
|
3b5be57489f960963a62b385177f13f25de452c3
|
refs/heads/master
| 2021-01-21T08:20:25.746106
| 2017-05-22T21:50:42
| 2017-05-22T21:50:42
| 91,623,309
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,873
|
py
|
## 1. Introduction ##
strings = ["data science", "big data", "metadata"]
regex = "data"
## 2. Wildcards in Regular Expressions ##
strings = ["bat", "robotics", "megabyte"]
regex = "..t"
## 3. Searching the Beginnings And Endings Of Strings ##
strings = ["better not put too much", "butter in the", "batter"]
bad_string = "We also wouldn't want it to be bitter"
regex = ""
regex = "^b.tter"
## 5. Reading and Printing the Data Set ##
import csv
#Open and read file. Therafter, convert to list
file = csv.reader(open("askreddit_2015.csv",'r'))
post_with_header = list(file)
posts = post_with_header[1:]
for val in posts[:10]:
print(val)
## 6. Counting Simple Matches in the Data Set with re() ##
import re
#Initialize Counter
of_reddit_count = 0
#Counting loop that counts for "of Reddit" in first element of every row
for val in posts:
if re.search("of Reddit", val[0]):
of_reddit_count +=1
else:
pass
## 7. Using Square Brackets to Match Multiple Characters ##
import re
of_reddit_count = 0
for row in posts:
if re.search("of [Rr]eddit", row[0]) is not None:
of_reddit_count += 1
## 8. Escaping Special Characters ##
import re
serious_count = 0
for row in posts:
if re.search("\[Serious]",row[0]) is not None:
serious_count +=1
print(row[0])
## 9. Combining Escaped Characters and Multiple Matches ##
import re
serious_count = 0
for row in posts:
if re.search("\[[sS]erious\]", row[0]) is not None:
serious_count += 1
## 10. Adding More Complexity to Your Regular Expression ##
import re
serious_count = 0
for row in posts:
if re.search("[\[\(][Ss]erious[\]\)]", row[0]) is not None:
serious_count += 1
## 11. Combining Multiple Regular Expressions ##
import re
serious_start_count = 0
serious_end_count = 0
serious_count_final = 0
for row in posts:
if re.search("^[\[\(][Ss]erious[\]\)]",row[0]) is not None:
serious_start_count += 1
if re.search("[\[\(][Ss]erious[\]\)]$", row[0]) is not None:
serious_end_count += 1
if re.search("^[\[\(][Ss]erious[\]\)]|[\[\(][Ss]erious[\]\)]$", row[0]) is not None:
serious_count_final += 1
## 12. Using Regular Expressions to Substitute Strings ##
import re
posts_new = []
for row in posts:
row[0] = re.sub("[\[\(][Ss]erious[\]\)]", "[Serious]", row[0])
posts_new.append(row)
## 13. Matching Years with Regular Expressions ##
import re
year_strings = []
for string in strings:
if re.search("[1-2][0-9][0-9][0-9]", string) is not None:
year_strings.append(string)
## 14. Repeating Characters in Regular Expressions ##
import re
year_strings = []
for y in strings:
if re.search("[0-2][0-9]{3}",y) is not None:
year_strings.append(y)
## 15. Challenge: Extracting all Years ##
import re
years = re.findall("[0-2][0-9]{3}", years_string)
|
[
"noreply@github.com"
] |
CloudChaoszero.noreply@github.com
|
dbd6c32ba34d3fe4be7a38d40e085d64dc1c2ffc
|
efa2de2e0ca886a22be34c40cb4b4d397aa05015
|
/AGE/link_pred_ddi.py
|
47fa5e9d4c3f629a78f1356612226821a403eb4d
|
[] |
no_license
|
chuanqichen/cs224w
|
34c522d95c37089298a03ff2fd113c5b613036cd
|
aeebce6810221bf04a9a14d8d4369be76691b608
|
refs/heads/main
| 2023-03-18T18:03:16.468040
| 2021-03-21T18:55:48
| 2021-03-21T18:55:56
| 345,116,683
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,490
|
py
|
from __future__ import division
from __future__ import print_function
import os, sys
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=RuntimeWarning)
warnings.simplefilter(action='ignore', category=UserWarning)
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
# For replicating the experiments
SEED = 42
import argparse
import time
import random
import numpy as np
import scipy.sparse as sp
import torch
np.random.seed(SEED)
torch.manual_seed(SEED)
from torch import optim
import torch.nn.functional as F
from model import LinTrans, LogReg
from optimizer import loss_function
from utils import *
from sklearn.cluster import SpectralClustering, KMeans
from clustering_metric import clustering_metrics
from tqdm import tqdm
from sklearn.preprocessing import normalize, MinMaxScaler
from sklearn import metrics
import matplotlib.pyplot as plt
from ogb.linkproppred import PygLinkPropPredDataset, Evaluator
import torch_geometric.transforms as T
parser = argparse.ArgumentParser()
parser.add_argument('--gnnlayers', type=int, default=1, help="Number of gnn layers")
parser.add_argument('--linlayers', type=int, default=1, help="Number of hidden layers")
parser.add_argument('--epochs', type=int, default=400, help='Number of epochs to train.')
parser.add_argument('--dims', type=int, default=[500], help='Number of units in hidden layer 1.')
parser.add_argument('--lr', type=float, default=0.001, help='Initial learning rate.')
parser.add_argument('--upth_st', type=float, default=0.0011, help='Upper Threshold start.')
parser.add_argument('--lowth_st', type=float, default=0.1, help='Lower Threshold start.')
parser.add_argument('--upth_ed', type=float, default=0.001, help='Upper Threshold end.')
parser.add_argument('--lowth_ed', type=float, default=0.5, help='Lower Threshold end.')
parser.add_argument('--upd', type=int, default=10, help='Update epoch.')
parser.add_argument('--bs', type=int, default=10000, help='Batchsize.')
parser.add_argument('--dataset', type=str, default='wiki', help='type of dataset.')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='Disables CUDA training.')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda is True:
print('Using GPU')
torch.cuda.manual_seed(SEED)
os.environ["CUDA_VISIBLE_DEVICES"] = "5"
def clustering(Cluster, feature, true_labels):
f_adj = np.matmul(feature, np.transpose(feature))
predict_labels = Cluster.fit_predict(f_adj)
cm = clustering_metrics(true_labels, predict_labels)
db = -metrics.davies_bouldin_score(f_adj, predict_labels)
acc, nmi, adj = cm.evaluationClusterModelFromLabel(tqdm)
return db, acc, nmi, adj
def update_similarity(z, upper_threshold, lower_treshold, pos_num, neg_num):
f_adj = np.matmul(z, np.transpose(z))
cosine = f_adj
cosine = cosine.reshape([-1,])
pos_num = round(upper_threshold * len(cosine))
neg_num = round((1-lower_treshold) * len(cosine))
pos_inds = np.argpartition(-cosine, pos_num)[:pos_num]
neg_inds = np.argpartition(cosine, neg_num)[:neg_num]
return np.array(pos_inds), np.array(neg_inds)
def update_threshold(upper_threshold, lower_treshold, up_eta, low_eta):
upth = upper_threshold + up_eta
lowth = lower_treshold + low_eta
return upth, lowth
def get_preds(emb, adj_orig, edges):
def sigmoid(x):
return 1 / (1 + np.exp(-x))
adj_rec = np.dot(emb, emb.T)
preds = []
for e in edges:
preds.append(sigmoid(adj_rec[e[0], e[1]]))
return torch.FloatTensor(preds)
def gae_for(args):
print("Using {} dataset".format(args.dataset))
dataset = PygLinkPropPredDataset(name='ogbl-ddi',
transform=T.ToDense())
data = dataset[0]
adj = data.adj.numpy()
adj = sp.csr_matrix(adj)
n = adj.shape[0]
features = np.ones((n, 1))
#split_edge = dataset.get_edge_split()
n_nodes, feat_dim = features.shape
dims = [feat_dim] + args.dims
print("Model dims", dims)
layers = args.linlayers
# Store original adjacency matrix (without diagonal entries) for later
print('adjacency shape', adj.shape)
adj = adj - sp.dia_matrix((adj.diagonal()[np.newaxis, :], [0]), shape=adj.shape)
adj.eliminate_zeros()
adj_orig = adj
split_edge = dataset.get_edge_split()
val_edges = split_edge['valid']['edge']
val_edges_false = split_edge['valid']['edge_neg']
test_edges = split_edge['test']['edge']
test_edges_false = split_edge['test']['edge_neg']
train_edges = split_edge['train']['edge']
adj_train = mask_test_edges_ddi(adj, train_edges)
adj = adj_train
n = adj.shape[0]
print('feature shape', features.shape)
adj_norm_s = preprocess_graph(adj, args.gnnlayers, norm='sym', renorm=True)
sm_fea_s = sp.csr_matrix(features).toarray()
print('Laplacian Smoothing...')
for a in adj_norm_s:
sm_fea_s = a.dot(sm_fea_s)
adj_1st = (adj + sp.eye(n)).toarray()
adj_label = torch.FloatTensor(adj_1st)
model = LinTrans(layers, dims)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
sm_fea_s = torch.FloatTensor(sm_fea_s)
adj_label = adj_label.reshape([-1,])
print("sm_fea_s shape", sm_fea_s.shape)
print("adj_label shape", adj_label.shape)
if args.cuda:
model.cuda()
inx = sm_fea_s.cuda()
adj_label = adj_label.cuda()
else:
inx = sm_fea_s
pos_num = len(adj.indices)
neg_num = n_nodes*n_nodes-pos_num
print("Num Pos Samples", pos_num)
print("Num Neg Samples", neg_num)
up_eta = (args.upth_ed - args.upth_st) / (args.epochs/args.upd)
low_eta = (args.lowth_ed - args.lowth_st) / (args.epochs/args.upd)
pos_inds, neg_inds = update_similarity(normalize(sm_fea_s.numpy()), args.upth_st, args.lowth_st, pos_num, neg_num)
print("pos_inds shape", pos_inds.shape)
print("neg_inds shape", neg_inds.shape)
upth, lowth = update_threshold(args.upth_st, args.lowth_st, up_eta, low_eta)
bs = min(args.bs, len(pos_inds))
length = len(pos_inds)
if args.cuda:
pos_inds_cuda = torch.LongTensor(pos_inds).cuda()
else:
pos_inds_cuda = torch.LongTensor(pos_inds)
evaluator = Evaluator(name='ogbl-ddi')
best_lp = 0.
print("Batch Size", bs)
print('Start Training...')
for epoch in tqdm(range(args.epochs)):
st, ed = 0, bs
batch_num = 0
model.train()
length = len(pos_inds)
while ( ed <= length ):
if args.cuda:
sampled_neg = torch.LongTensor(np.random.choice(neg_inds, size=ed-st)).cuda()
else:
sampled_neg = torch.LongTensor(np.random.choice(neg_inds, size=ed-st))
print("sampled neg shape", sampled_neg.shape)
print("--------pos inds shape", pos_inds_cuda.shape)
sampled_inds = torch.cat((pos_inds_cuda[st:ed], sampled_neg), 0)
print("sampled inds shape", sampled_inds.shape)
t = time.time()
optimizer.zero_grad()
xind = sampled_inds // n_nodes
yind = sampled_inds % n_nodes
print("xind shape", xind.shape)
print("yind shape", yind.shape)
x = torch.index_select(inx, 0, xind)
y = torch.index_select(inx, 0, yind)
print("some x", x[:5])
print("some y", y[:5])
print("x shape", x.shape)
print("y shape", y.shape)
zx = model(x)
zy = model(y)
print("zx shape", zx.shape)
print("zy shape", zy.shape)
if args.cuda:
batch_label = torch.cat((torch.ones(ed-st), torch.zeros(ed-st))).cuda()
else:
batch_label = torch.cat((torch.ones(ed-st), torch.zeros(ed-st)))
batch_pred = model.dcs(zx, zy)
print("Batch label shape", batch_label.shape)
print("Batch pred shape", batch_pred.shape)
loss = loss_function(adj_preds=batch_pred, adj_labels=batch_label, n_nodes=ed-st)
loss.backward()
cur_loss = loss.item()
optimizer.step()
st = ed
batch_num += 1
if ed < length and ed + bs >= length:
ed += length - ed
else:
ed += bs
if (epoch + 1) % args.upd == 0:
model.eval()
mu = model(inx)
hidden_emb = mu.cpu().data.numpy()
upth, lowth = update_threshold(upth, lowth, up_eta, low_eta)
pos_inds, neg_inds = update_similarity(hidden_emb, upth, lowth, pos_num, neg_num)
bs = min(args.bs, len(pos_inds))
if args.cuda:
pos_inds_cuda = torch.LongTensor(pos_inds).cuda()
else:
pos_inds_cuda = torch.LongTensor(pos_inds)
val_auc, val_ap = get_roc_score(hidden_emb, adj_orig, val_edges, val_edges_false)
if val_auc + val_ap >= best_lp:
best_lp = val_auc + val_ap
best_emb = hidden_emb
tqdm.write("Epoch: {}, train_loss_gae={:.5f}, time={:.5f}".format(
epoch + 1, cur_loss, time.time() - t))
pos_train_edge = train_edges
pos_valid_edge = val_edges
neg_valid_edge = val_edges_false
pos_test_edge = test_edges
neg_test_edge = test_edges_false
pos_train_pred = get_preds(hidden_emb, adj_orig, pos_train_edge)
pos_valid_pred = get_preds(hidden_emb, adj_orig, pos_valid_edge)
neg_valid_pred = get_preds(hidden_emb, adj_orig, neg_valid_edge)
pos_test_pred = get_preds(hidden_emb, adj_orig, pos_test_edge)
neg_test_pred = get_preds(hidden_emb, adj_orig, neg_test_edge)
results = {}
for K in [10, 20, 30]:
evaluator.K = K
train_hits = evaluator.eval({
'y_pred_pos': pos_train_pred,
'y_pred_neg': neg_valid_pred,
})[f'hits@{K}']
valid_hits = evaluator.eval({
'y_pred_pos': pos_valid_pred,
'y_pred_neg': neg_valid_pred,
})[f'hits@{K}']
test_hits = evaluator.eval({
'y_pred_pos': pos_test_pred,
'y_pred_neg': neg_test_pred,
})[f'hits@{K}']
results[f'Hits@{K}'] = (train_hits, valid_hits, test_hits)
for key, result in results.items():
train_hits, valid_hits, test_hits = result
print(key)
print(f'Epoch: {epoch:02d}, '
f'Loss: {cur_loss:.4f}, '
f'Train: {100 * train_hits:.2f}%, '
f'Valid: {100 * valid_hits:.2f}%, '
f'Test: {100 * test_hits:.2f}%')
print('---')
tqdm.write("Optimization Finished!")
auc_score, ap_score = get_roc_score(best_emb, adj_orig, test_edges, test_edges_false)
tqdm.write('Test AUC score: ' + str(auc_score))
tqdm.write('Test AP score: ' + str(ap_score))
if __name__ == '__main__':
gae_for(args)
|
[
"owhsu@stanford.edu"
] |
owhsu@stanford.edu
|
1339b5e2da73351b90d8bbc7a8b70a395472d3c0
|
fe6c7ddbd2ce513105346c3e9ecd858ee0240237
|
/inheritance.py
|
b057cdbb0dc94576693e9bf6f1d8884c58e9835e
|
[] |
no_license
|
adilreza/basic_python_practice
|
bfb4d5fc08c360f3b6808b74c431a75e09a09207
|
c712f47bdde081c305344ec396f9a146d4dad369
|
refs/heads/master
| 2020-08-07T00:45:08.938270
| 2019-10-11T13:29:18
| 2019-10-11T13:29:18
| 213,225,377
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 473
|
py
|
class parent_class:
a=4;
b=3;
def parent_fuction(self):
print("this is from parent function")
return 0;
class child_class(parent_class):#this way inherited
def child_function(self):
print("this is me from child function")
def make_sum(self):
sum = self.a+ self.b;
return sum;
if __name__=="__main__":
myobj = child_class()
print(myobj.a)
print(myobj.parent_fuction())
print(myobj.make_sum())
|
[
"adilreza043@gmail.com"
] |
adilreza043@gmail.com
|
9091f732ae972983486dcf8406038d70e2399992
|
1f7c9b7113985f17ad2e8d27e92bdfe0505c1e19
|
/com/drabarz/karolina/dominating_set.py
|
9152994aab5fa5af9f8d99001e42ac542edd057a
|
[] |
no_license
|
Szop-Kradziej/GIS_Dominating_Set
|
02891d0dd1eb839809601edf758ea84947ddb46c
|
ab16c6c8a1bb67446200aa195102c103938ed1b2
|
refs/heads/master
| 2016-08-12T11:46:23.628833
| 2016-01-15T18:21:48
| 2016-01-15T18:21:48
| 45,829,125
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,419
|
py
|
import networkx as nx
import sys
import getopt
import csv
import time
from com.drabarz.karolina.DominatingSetAlgorithm import DominatingSetAlgorithm
from com.drabarz.karolina.NetworkXAlgorithm import NetworkXAlgorithm
from com.drabarz.karolina.GreedyAlgorithm import GreedyAlgorithm
from com.drabarz.karolina.DispersedGreedyAlgorithm import DispersedGreedyAlgorithm
from com.drabarz.karolina.ClassicalSetCoverageAlgorithm import ClassicalSetCoverageAlgorithm
from com.drabarz.karolina.ModifiedGreedyAlgorithm import ModifiedGreedyAlgorithm
from com.drabarz.karolina.FastGreedyAlgorithm import FastGreedyAlgorithm
def getCommandLineArguments():
argv = sys.argv[1:]
graphFile = ''
setFile = ''
action = 'none'
try:
opts, args = getopt.getopt(argv,"hfcg:s:",["graphFile=","setFile="])
except getopt.GetoptError:
print 'test.py -g <graphFile> -s <setFile>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'To find the smallest dominating set:'
print '\ttest.py -f -g <graphFile> -s <setFile>'
print 'To check if set is dominating:'
print '\ttest.py -c -g <graphFile> -s <setFile>'
sys.exit()
elif opt == '-f' :
action = "findDominatingSet"
elif opt == '-c' :
action = "checkIfSetIsDominating"
elif opt in ("-g", "--graphFile"):
graphFile = arg
elif opt in ("-s", "--setFile"):
setFile = arg
print 'Graph file is: ', graphFile
print 'Set file is: ', setFile
return [graphFile, setFile, action];
def createGraphFromFile(graphFile):
graph = nx.Graph();
try:
with open(graphFile, "rb") as inputfile:
reader = csv.reader(inputfile);
for i, line in enumerate(reader):
if i < 4: continue
edge = line[0].split('\t')
graph.add_edge(edge[0], edge[1]);
except IOError:
print 'There is a incorrect name of graph file'
sys.exit()
except IndexError:
print 'Incorrect input file structure'
sys.exit()
return graph;
def findAndShowDominatingSet(graph, setFile):
algorithm = chooseAlgorithm();
printGraphParamiters(graph);
start_time = time.time()
dominatingSet = algorithm.findDominatingSet(graph);
stop_time = time.time() - start_time
print "Algorithm execution time = ", stop_time
printDominatingSet(dominatingSet);
saveDominatingSet(dominatingSet, setFile);
return;
def chooseAlgorithm():
while 1 :
showMainMenu();
answer = raw_input();
if answer == '1' :
return GreedyAlgorithm();
elif answer == '2' :
return DispersedGreedyAlgorithm();
elif answer == '3' :
return ClassicalSetCoverageAlgorithm();
elif answer == '4' :
return ModifiedGreedyAlgorithm();
elif answer == '5' :
return FastGreedyAlgorithm();
elif answer == '6' :
return NetworkXAlgorithm();
sys.exc_clear();
def showMainMenu():
print "Choose algorithm to calculate the smallest dominating set: "
print "\t1) greedy algorithm"
print "\t2) dispersed greedy algorithm"
print "\t3) classical set coverage algorithm"
print "\t4) modified greedy algorithm"
print "\t5) fast greedy algorithm"
print "\t6) use algorithm implemented in NetworkX library"
return;
def printGraphParamiters(graph):
print "Graph description: "
print "Number of nodes: ", nx.number_of_nodes(graph);
print "Number of edges: ", nx.number_of_edges(graph), "\n";
return;
def printDominatingSet(dominatingSet):
print "Number of nodes in dominating set: ", len(dominatingSet);
for node in dominatingSet:
print node;
return;
def saveDominatingSet(dominatingSet, setFile):
try:
with open(setFile, 'wb') as outputFile:
writer = csv.writer(outputFile);
outputFile.write("#Number of nodes in dominating set: " + str(len(dominatingSet)) + "\n");
for i in range(0, len(dominatingSet)):
outputFile.write(str(dominatingSet[i])+ '\n')
except IOError:
print 'There is no set file name selected'
return;
def checkIfSetIsDominating(graph, setFile):
inputSet = createSetFromFile(setFile);
isDominatingSet = checkIfIsDominatingSet(graph, inputSet);
print "Is set dominating: ", isDominatingSet;
return;
def createSetFromFile(setFile):
inputSet = set();
try:
with open(setFile, "rb") as inputfile:
reader = csv.reader(inputfile);
for i, line in enumerate(reader):
if i < 1: continue
node = line[0];
inputSet.add(node);
except IOError:
print 'There is a wrong name of set file'
sys.exit()
except IndexError:
print 'Incorrect input file structure'
sys.exit()
return inputSet;
def checkIfIsDominatingSet(graph, dominatingSet):
return nx.is_dominating_set(graph, dominatingSet);
[graphFile, setFile, action] = getCommandLineArguments();
graph = createGraphFromFile(graphFile);
if action == "findDominatingSet" :
findAndShowDominatingSet(graph, setFile);
elif action == "checkIfSetIsDominating" :
checkIfSetIsDominating(graph, setFile);
else :
sys.exit();
|
[
"karolina.drabarz@gmail.com"
] |
karolina.drabarz@gmail.com
|
2871fd432a366e045045dbb0053737b299a418e7
|
0a61fc847043d677dae701a70b90f119dd7ab8fb
|
/credentials_replacer/__main__.py
|
e8a05c1313fa14bcc3d2585e6c960c70585d4e1e
|
[
"MIT"
] |
permissive
|
MELODYAPP/aws-credential-replacer
|
4e286def8ab513557ad296ccf444d29537357708
|
fa645e5613aee242bd2ed670bc7bbc2ba797bb09
|
refs/heads/master
| 2021-01-01T20:39:22.871330
| 2017-08-01T14:47:04
| 2017-08-01T14:47:04
| 98,907,993
| 0
| 0
| null | 2017-07-31T16:12:15
| 2017-07-31T16:12:15
| null |
UTF-8
|
Python
| false
| false
| 66
|
py
|
from .replacer import main
if __name__ == '__main__':
main()
|
[
"prokhorov@saritasa.com"
] |
prokhorov@saritasa.com
|
9a817067b8f27c331d6af99100b2914f84c06935
|
91d8e969facfc4fd7f6002448890d3b5a65fe380
|
/api/openAccountAPI.py
|
3e72f9b26ca14d3c281cc11b74bf130b03a9510a
|
[] |
no_license
|
1105814583/P2P_python
|
4c2f5c20a7514d35d7820835ea88f812bbdb4db3
|
bb3da29f877703d884c6b74cea3fb5e232ed1f65
|
refs/heads/master
| 2023-09-06T02:15:02.376197
| 2021-09-16T10:35:58
| 2021-09-16T10:35:58
| 407,118,848
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 246
|
py
|
import app
class openAccountAPI:
def __init__(self):
self.open_account_url = app.BASE_URL + "/trust/trust/register"
def openAccount(self, session):
response = session.post(self.open_account_url)
return response
|
[
"1105814583@qq.com"
] |
1105814583@qq.com
|
7f3e63f22434cad4df3c5f31228f840cee385144
|
255e19ddc1bcde0d3d4fe70e01cec9bb724979c9
|
/all-gists/5259522/snippet.py
|
530896846672f9f888ff87c34b403125582a7bbd
|
[
"MIT"
] |
permissive
|
gistable/gistable
|
26c1e909928ec463026811f69b61619b62f14721
|
665d39a2bd82543d5196555f0801ef8fd4a3ee48
|
refs/heads/master
| 2023-02-17T21:33:55.558398
| 2023-02-11T18:20:10
| 2023-02-11T18:20:10
| 119,861,038
| 76
| 19
| null | 2020-07-26T03:14:55
| 2018-02-01T16:19:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,252
|
py
|
#!/usr/bin/env python
import sys
files = []
if len(sys.argv) > 2:
for file in sys.argv[1:]:
files.append(str(file))
else:
print "Usage: Wordcount.py file1 file2 file3 ..."
words_to_ignore = ["that","what","with","this","would","from","your","which","while","these"]
things_to_strip = [".",",","?",")","(","\"",":",";","'s"]
words_min_size = 4
print_in_html = True
text = ""
for file in files:
f = open(file,"rU")
for line in f:
text += line
words = text.lower().split()
wordcount = {}
for word in words:
for thing in things_to_strip:
if thing in word:
word = word.replace(thing,"")
if word not in words_to_ignore and len(word) >= words_min_size:
if word in wordcount:
wordcount[word] += 1
else:
wordcount[word] = 1
sortedbyfrequency = sorted(wordcount,key=wordcount.get,reverse=True)
def print_txt(sortedbyfrequency):
for word in sortedbyfrequency:
print word, wordcount[word]
def print_html(sortedbyfrequency):
print "<html><head><title>Wordcount.py Output</title></head><body><table>"
for word in sortedbyfrequency:
print "<tr><td>%s</td><td>%s</td></tr>" % (word,wordcount[word])
print "</table></body></html>"
if print_in_html == True:
print_html(sortedbyfrequency)
else:
print_txt(sortedbyfrequency)
|
[
"gistshub@gmail.com"
] |
gistshub@gmail.com
|
ea0a54fdc36a8a8a37ace27442c6675bd11d2208
|
8facec89b1fded458cf3c40dfe4ed2a6b7af87aa
|
/advanced/class_attributes_management/comparation_with_4_methods_simple/descriptor_implement_improved.py
|
bbe4764040fdbd62f2644e8e08dbff6c19657902
|
[] |
no_license
|
tianwei1992/Python_oop_leaning
|
72bf4c4c0c71cf736bc14912c4aef28642755c80
|
7e0f4e95c0d9bf7aa9fd95fcf37fc86f90ea8db7
|
refs/heads/master
| 2020-03-28T16:55:18.592234
| 2018-12-12T03:06:22
| 2018-12-12T03:06:22
| 148,740,603
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,194
|
py
|
class Powers():
def __init__(self, square_base, cube_base):
self._square_base = square_base
self._cube_base = cube_base
class SquareDescriptor():
def __get__(self, instance, owner):
if instance is None:
"""类.attr - > Descriptor,没毛病"""
return self
return instance._square_base ** 2
def __set__(self, instance, value):
instance._square_base = value
class CubeDescriptor():
def __get__(self, instance, owner):
if instance is None:
return self
return instance._cube_base ** 3
square = SquareDescriptor()
cube = CubeDescriptor()
X = Powers(3, 4)
"""Powers.square = 5不会触发SquareDescriptor.__get__方法,而是直接更改Powers.square为一个普通的属性,值为5,这也会影响到所以示例
所以结论:对标识符产生的属性,不要试图从类上面赋值。"""
print(Powers.square)
print()
print(X.square) # 3 ** 2 = 9
print(X.cube) # 4 ** 3 = 64
X.square = 5
print(X.square) # 5 ** 2 = 25
"""描述符定义的属性在类中定义,是类属性,但是get和set一般对实例用。
直接对类用set相当于覆盖原有属性为普通属性,偶尔对类用get,是类似Powers.square.__doc__的时候"""
|
[
"879983690@qq,com"
] |
879983690@qq,com
|
632a0ef0ecdbdc4a907c6df0aa1e539704695ae4
|
429a416abc7def45f7f6dc186ef46554081e5dee
|
/tensormorph/zzz/affix_test_old.py
|
80fb7bc009319ed8f672eb13ec1bbee20979c1e1
|
[] |
no_license
|
colincwilson/tensormorph
|
9de8c1f0e6639c974d5b799e0712bca79ce639ad
|
c3a6fc9dac643e7600f2a177366a4c405c8013f2
|
refs/heads/main
| 2022-02-14T03:34:04.577317
| 2021-10-01T13:35:58
| 2021-10-01T13:35:58
| 147,841,723
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,386
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse, re, sys
from tensormorphy import environ, evaluator, phon_features
from tensormorphy.segment_embedder import SegmentEmbedder
from tensormorphy.form_embedder import FormEmbedder
from tensormorphy.dataset import DataSet
from tensormorphy.affixer import Affixer
from tensormorphy.trainer import Trainer
from affix_test_cases import import_data
import pandas as pd
import numpy as np
# parse commandline arguments
argparser = argparse.ArgumentParser()
argparser.add_argument('--nbatch',\
help='Number of <input,output> pairs in each batch')
argparser.add_argument('--nepoch',\
help='Number of training epochs')
args, residue = argparser.parse_known_args()
# select dataset (xxx make commandline argument)
data_select = ['english_ing', 'english_ness', 'english_un',\
'english_shm', 'chamorro_um', 'hungarian_dat',\
'hebrew_paal', 'hindi_nouns', 'maltese', 'conll'][4]
data = import_data(data_select)
data_set = DataSet( data['dat'],
data['held_in_stems'],
data['held_out_stems'],
data['vowels']
)
feature_file = '~/Dropbox/TensorProductStringToStringMapping/00features/' +\
['hayes_features.csv', 'panphon_ipa_bases.csv'][0]
feature_matrix = phon_features.import_features(feature_file, data_set.segments)
symbol_params = {'feature_matrix': feature_matrix, }
role_params = {'nrole': data_set.max_len+4, }
form_embedder = FormEmbedder(symbol_params, role_params)
environ.init(form_embedder) # makes dummy morphosyn_embedder
data_set.split_and_embed(test_size=0.25)
model = Affixer()
trainer = Trainer(model)
environ.config.nepoch = 1500
trainer.train(data_set)
train_pred, test_pred =\
evaluator.evaluate(model, data_set)
sys.exit(0)
# # # # # OLD CODE # # # # #
seq_embedder, morph_embedder, train, test = import_data(data_select)
tpr.init(seq_embedder, morph_embedder)
print('filler dimensionality:', tpr.dfill)
print('role dimensionality:', tpr.drole)
print('distributed roles?', tpr.random_roles)
print('train/test split:')
print('\t', len(train), 'training examples')
print('\t', len(test), 'testing examples')
# run trainer
tpr.save_dir = '/Users/colin/Desktop/tmorph_output'
nbatch = min(40,len(train)) if args.nbatch is None else int(args.nbatch)
nepoch = 1000 if args.nepoch is None else int(args.nepoch)
trainer = trainer.Trainer( redup=False, lr=1.0e-1, dc=0.0, verbosity=1 )
affixer, decoder = trainer.train_and_test( train, test, nbatch=nbatch, max_epochs=nepoch )
if False:
tpr.trace = True
train = train.iloc[0:2].reset_index()
test = test.iloc[0:2].reset_index()
train.stem, train.output = u't r i s t i', u't r u m i s t i'
trainer.train_and_test1(train, test, nbatch=len(train))
print(tpr.traces)
for x in tpr.traces:
f = '/Users/colin/Desktop/dump/'+ x +'.txt'
y = tpr.traces[x]
print(y.__class__.__name__)
if type(y) is np.ndarray:
np.savetxt(f, y, delimiter=',')
else:
print(x, y)
if False: # test by hand
trainer.affixer.morph_attender.tau.data[:] = 5.0
trainer.affixer.posn_attender.tau.data[:] = 5.0
Stems = string2tpr(u'q a f a ts').unsqueeze(0)
Affix = string2tpr(u't i o ⋉', False).unsqueeze(0)
copy = torch.ones(tpr.nrole).unsqueeze(0)
pivot = torch.zeros(tpr.nrole).unsqueeze(0)
unpivot = torch.zeros(tpr.nrole).unsqueeze(0)
copy[0,2] = copy[0,4] = 0.0
pivot[0,0] = pivot[0,3] = 1.0
unpivot[0,1] = unpivot[0,2] = 1.0
test = {\
'affix': Affix,\
'copy': copy,\
'pivot': pivot,\
'unpivot': unpivot\
}
output, traces = trainer.affixer(Stems, 10, True, test)
stem = trainer.decoder.decode(Stems)[0]
affix = trainer.decoder.decode(Affix)[0]
stem = [x+' _' if pivot[0,i]==1.0 else x for i,x in enumerate(stem.split(' '))]
stem = [x+'/' if copy[0,i]==0.0 else x for i,x in enumerate(stem)]
affix = [x+' _' if i<25 and unpivot[0,i]==1.0 else x for i,x in enumerate(affix.split(' '))]
stem = ' '.join(stem)
affix = ' '.join(affix)
output = ' '.join(trainer.decoder.decode(output))
print('stem:', stem)
print('affix:', affix)
print(' -> ')
print('output: ', output)
for trace in traces:
print(trace, np.round(traces[trace], 2))
sys.exit(0)
|
[
"colin.chris.wilson@gmail.com"
] |
colin.chris.wilson@gmail.com
|
36c925efa563932cdec64b3abb5f6ee5eacb4c01
|
829af66682d29e0c2dd70651d034fc28883a40ab
|
/Coursera_Capstone.py
|
fca8d3e509269d8baa0b8b370c0c6dd193201d0c
|
[] |
no_license
|
jschuler04/Coursera_Capstone
|
d8e39b5532f262ea2ff16c38085c8dafbf97e95b
|
f89af9b27b1c184fe7ead50a756ea400bdb0b33f
|
refs/heads/main
| 2023-06-03T06:08:06.984789
| 2021-06-18T20:53:02
| 2021-06-18T20:53:02
| 375,830,469
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#/this notebook will be mainly used for the Coursera Capstone project.
import pandas as pd
import numpy as np
print("Hello Capstone Project Course!")
|
[
"noreply@github.com"
] |
jschuler04.noreply@github.com
|
794969aef4445885500cfd5a79f01106d63b753c
|
12a522cadf20a38f5fd2ad2eb758d40f7bc50f3a
|
/CIS_024C_Python/homeWork/Exercise9/main1.py
|
1737c779ac14ea834b8d03ce97842e8edde3b668
|
[] |
no_license
|
Ry-Mu/cis024c_python
|
f25012ce0e1e58dff05abacda9299083ca63e2ad
|
0b0cd5fc5b425b251bb9ac316e420403a7dfda10
|
refs/heads/master
| 2021-09-02T13:48:54.820508
| 2018-01-03T01:43:08
| 2018-01-03T01:43:08
| 103,226,159
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 164
|
py
|
import sys
def add(n1,n2):
return n1 + n2
number1 = int(raw_input("Enter a number:"))
number2 = int(raw_input("Enter a number:"))
print add(number1,number2)
|
[
"ryan.munguia92@gmail.com"
] |
ryan.munguia92@gmail.com
|
f49a5e37d0b4279902872dcc74a5ea78ab2137a3
|
6f5c0db7b845cb62c951b2467957ffe3cb0aad35
|
/stats.py
|
2616a77c878bbfb064e02cbecd9ad98b5958f460
|
[] |
no_license
|
baydarich/infohash-searcher
|
300056c8255656b049e8aa4c6cc46df7e7f9500f
|
7fad6763c099934586bb286f160f3e52063420ff
|
refs/heads/master
| 2021-06-14T06:49:35.439588
| 2017-03-05T11:38:17
| 2017-03-05T11:38:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,211
|
py
|
#!/usr/env/python
from bencode import bdecode, bencode
import os
b = 1024 * 1024
ranges = [{1: (1, 1024 * 1024)}]
t = 1
for j in range(18):
t = 1024 * 1024
ranges.append({j + 2: (t * 2 ** j + 1, t * 2 ** (j + 1))})
bs = {}
# bs = {piece_length:[{ran:count}, {ran:count}]}
base_path = "/home/horn/Documents/SNE/CCF/proj/test-torrents/"
files = os.listdir(base_path)
stat = []
r = 0
for i in files:
length = 0
with open("%s%s" % (base_path, i)) as _file:
info_orig = bdecode(_file.read())['info']
piece_length = info_orig['piece length']
try:
length = info_orig['length']
except KeyError:
for j in info_orig['files']:
length += j['length']
finally:
for j, k in enumerate(ranges):
if k[j + 1][0] <= length <= k[j + 1][1]:
r = j + 1
break
try:
bs[piece_length][r] += 1
except KeyError:
try:
bs[piece_length][r] = 1
except KeyError:
bs[piece_length] = {r: 1}
for k, v in bs.iteritems():
print k, sorted(v, reverse=True)
print bs
|
[
"a@bakhtin.net"
] |
a@bakhtin.net
|
4077ee7230fdd5fcb8bf27ad4eec1e47ecf60567
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/J/JasonSanford/great_american_beer_festival.py
|
ccc57650e41049eee111fa8bbfab0a4bd1f01ccf
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210
| 2013-09-30T17:02:59
| 2013-09-30T17:02:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,020
|
py
|
import scraperwiki
import lxml.html
html = scraperwiki.scrape("http://www.greatamericanbeerfestival.com/at-the-festival/breweries-at-the-2012-festival")
root = lxml.html.fromstring(html)
i = 1
for tr in root.cssselect("#brewery_table tbody tr"):
tds = tr.cssselect("td")
data = {
'id' : i,
'name' : tds[0].text_content(),
'city' : tds[1].text_content(),
'state' : tds[2].text_content(),
}
scraperwiki.sqlite.save(unique_keys=['id'], data=data)
i += 1import scraperwiki
import lxml.html
html = scraperwiki.scrape("http://www.greatamericanbeerfestival.com/at-the-festival/breweries-at-the-2012-festival")
root = lxml.html.fromstring(html)
i = 1
for tr in root.cssselect("#brewery_table tbody tr"):
tds = tr.cssselect("td")
data = {
'id' : i,
'name' : tds[0].text_content(),
'city' : tds[1].text_content(),
'state' : tds[2].text_content(),
}
scraperwiki.sqlite.save(unique_keys=['id'], data=data)
i += 1
|
[
"pallih@kaninka.net"
] |
pallih@kaninka.net
|
2740845c8dea1c81052693b87ed8201e5e26e8c6
|
7b7ca1ab3f5364756ea67d8c2e39b68a58ab8f06
|
/First_improved ws model.py
|
83daded0d204aadd0b0af3ffd89e43d18fca0168
|
[] |
no_license
|
Isabellahu/Complex-Network
|
6a1f065ec12ab4eb86b390205b8f343eb95204eb
|
511683750636fd198d12963771ca61255b789641
|
refs/heads/master
| 2020-04-01T17:49:55.732821
| 2018-10-17T14:09:22
| 2018-10-17T14:09:22
| 153,453,532
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,343
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 28 15:32:55 2017
@author: 90662
"""
#WS小世界模型构建
import random
from numpy import *
import networkx as nx
import matplotlib.pyplot as plt
#假设参加聚会,每个人只认识一个主角
def CreateNetwork(n,k,p,matrix):
i = 1
for j in range(n):
matrix[1][j] = 1
matrix[j][1] = 1
def SmallWorld(n,k,p,matrix):
#随机产生一个概率p_change,如果p_change < p, 重新连接边
p_change = 0.0
edge_change = 0
for i in range(n):
#t = int(k/2)
for j in range( k // 2 + 1):
#需要重新连接的边
p_change = (random.randint(0,n-1)) / (double)(n)
#重新连接
if p_change < p:
#随机选择一个节点,排除自身连接和重边两种情况
while(1):
node_NewConnect = (random.randint(0,n-1)) + 1
if matrix[i][node_NewConnect] == 0 and node_NewConnect != i:
break
if (i+j) <= (n-1):
matrix[i][i+j] = matrix[i+j][i] = 0
else:
matrix[i][i+j-(n-1)] = matrix[i+j-(n-1)][i] = 0
matrix[i][node_NewConnect] = matrix[node_NewConnect][i] = 1
edge_change += 1
else:
print("no change\n",i+j)
#test
print("small world network\n")
for i in range(n):
for j in range(n):
print(matrix[i][j])
print("\n")
print("edge_change = ",edge_change)
print("ratio = ",(double)(edge_change)/(n*k/2))
#将matrix写入文件
def DataFile(n,k,p,matrix):
# 打开一个文件
f = open("C:/0network/data.txt", "w")
#matrix[[[1 for i in range(n)] [1 for j in range(n)]]
for i in range(n):
for j in range(n):
netdata = ','.join(str(matrix[i][j]))
f.write(netdata)
f.write('\n')
#f.write("true")
# 关闭打开的文件
f.close()
#print(netdata)
print('end')
# 画图
def Drawmap(n,matrix,G):
#添加n个节点
for i in range(n):
G.add_node(i)
#添加边,if = 1 then return [(i,j)]
for i in range(n):
for j in range(n):
if matrix[i][j] == 1:
G.add_edge(i,j)
#定义一个布局,采用circular布局方式
pos = nx.circular_layout(G)
#绘制图形
nx.draw(G,pos,with_labels=False,node_size = 30)
#输出方式1: 将图像存为一个png格式的图片文件
plt.savefig("WS-Network-change1-2.png")
#输出方式2: 在窗口中显示这幅图像
plt.show()
#平均群聚系数
def average_clustering(n,matrix):
#三元组
number_three_tuple = 0.0
#三角形
Triangle = 0.0
#聚类系数
clustering_coefficient = 0.0
for i in range(n):
three_tuple = 0.0
sum_edge = 0
for j in range(n):
if matrix[i][j] == 1 or matrix[j][i] == 1:
sum_edge += 1
float(sum_edge)
#计算每个节点的三元组个数
three_tuple = int((sum_edge*(sum_edge-1.0))/2.0)
#节点i的边组成列表mylist,并且每次循环之前初始为空值
myList = []
for j in range(i,n):
if matrix[i][j] == 1 or matrix[j][i] == 1:
myList.append(j)
#如果myList中的边(i,j)等于1,则形成三角形
for k in range(len(myList)):
for q in range(k,len(myList)):
if matrix[myList[k]][myList[q]] == 1 or matrix[myList[q]][myList[k]] == 1:
Triangle += 1
if three_tuple != 0:
clustering_coefficient += (Triangle/three_tuple)
clustering_coefficient = clustering_coefficient/n
print('clustering_coefficient = ',clustering_coefficient)
#Floyd算法求最短路径
def Ford(n,matrix):
#出发点v
#到达点w
#中转点K
#初始化新的邻接矩阵new_m,路径矩阵dis
dis = zeros((n,n),int)
new_m = zeros((n,n),int)
for v in range(n):
for w in range(n):
dis[v][w] = w
if matrix[v][w] == 0:
new_m[v][w] = 6666666
elif matrix[v][w] == 1:
new_m[v][w] = 1
dis[v][w] = 1
for k in range(n):
for v in range(n):
for w in range(n):
#如果经过中转点的路径比两点路径短
if (new_m[v][k] + new_m[k][w]) < new_m[v][w]:
new_m[v][w] = new_m[v][k] + new_m[k][w]
#dis[v][w] = dis[v][k]
dis[v][w] = 2
#打印节点
sum = 0.0
for v in range(n):
for w in range(v+1,n):
#print('v= ,',v,'w = ',w)
#print('dis[v][w] = ',dis[v][w])
sum = sum + dis[v][w]
float(n)
average_shortest_path_length = sum/(n*(n-1.0)/2)
print('average_shortest_path_length = ',average_shortest_path_length)
#节点度分布
def node_degree_distribution(n,matrix):
#求节点的度
degree = []
for i in range(n):
sum = 0
for j in range(n):
sum += matrix[i][j]
#print(sum)
degree.append(sum)
#print(degree)
degree.sort()
print('degree = ',degree)
sum_degree= 0.0
for i in range(n):
sum_degree += degree[i]
#print(sum_degree)
#生成x轴序列,从1到最大度
x = range(len(degree))
#将频次转换为频率,这用到Python的一个小技巧:列表内涵
y = [z/sum_degree for z in degree]
#在双对数坐标轴上绘制度分布曲线
plt.loglog(x,y,color="blue",linewidth=2)
#显示图表
plt.show()
#动态行为
#抗故意攻击 robustness against intentional attack
def node_robustness(n):
#node_degree_distribution(n,matrix)
#求出度最大的点
degree = []
for i in range(n):
sum = 0
for j in range(n):
sum += matrix[i][j]
degree.append(sum)
#将度最大的点删除边
node_flag = degree.index(max(degree))
for i in range(n):
matrix[node_flag][i] = 0
matrix[i][node_flag] = 0
#随机攻击 random attack
def node_random(n):
#产生一个随机数:0到n-1
node_flag = random.randint(0,n-1)
print(node_flag)
for i in range(n):
matrix[node_flag][i] = 0
matrix[i][node_flag] = 0
if __name__=="__main__":
print("main")
#输入三个参数:节点数N,参数K,概率P
n = input("请输入节点数 n = ",)
k = input("请输入参数(偶数) k = ",)
p = input("请输入概率 p = ",)
n=int(n)
k=int(k)
p=float(p)
matrix = zeros((n,n),int)
#matrix = zeros((n,n))
#print(matrix)
G = nx.Graph()
value = [n,k,p]
#print("\n")
CreateNetwork(n,k,p,matrix)
SmallWorld(n,k,p,matrix)
#print(matrix)
#导出到一个文件中
#DataFile(n,k,p,matrix)
#画图
Drawmap(n,matrix,G)
#被攻击前的网络特性
#群聚系数
average_clustering(n,matrix)
#平均最短路径
Ford(n,matrix)
#节点度分布
node_degree_distribution(n,matrix)
#抗故意攻击 robustness against intentional attack
#重新定义图
#node_robustness(n)
#G = nx.Graph()
#Drawmap(n,matrix,G)
#随机攻击 random attack
node_random(n)
#重新定义图
G = nx.Graph()
Drawmap(n,matrix,G)
#被攻击后的网络特性
#群聚系数
average_clustering(n,matrix)
#平均最短路径
Ford(n,matrix)
#节点度分布
node_degree_distribution(n,matrix)
|
[
"noreply@github.com"
] |
Isabellahu.noreply@github.com
|
3a327e8deeee0893aff957cfbbfef88f202634b2
|
76c0ed303ddf6f3afa3fe08592e70f497ab87e59
|
/ml3d/tf/models/point_rcnn.py
|
01491599862703ac453096c09419fc1ee0fc95da
|
[
"MIT"
] |
permissive
|
kukuruza/Open3D-ML
|
e4a044dbb56141a2df6b4a5218b7d01aa0250893
|
412746326836f7e1e153485ed1d4939046355c94
|
refs/heads/master
| 2023-07-16T22:09:01.797548
| 2021-07-29T15:05:59
| 2021-07-29T15:05:59
| 394,699,342
| 0
| 0
|
NOASSERTION
| 2021-08-10T15:13:02
| 2021-08-10T15:13:00
| null |
UTF-8
|
Python
| false
| false
| 73,879
|
py
|
import tensorflow as tf
import numpy as np
import os
import pickle
from .base_model_objdet import BaseModel
from ..modules.losses.smooth_L1 import SmoothL1Loss
from ..modules.losses.focal_loss import FocalLoss
from ..modules.losses.cross_entropy import CrossEntropyLoss
from ..modules.pointnet import Pointnet2MSG, PointnetSAModule
from ..utils.objdet_helper import xywhr_to_xyxyr
from open3d.ml.tf.ops import nms
from ..utils.tf_utils import gen_CNN
from ...datasets.utils import BEVBox3D, DataProcessing, ObjdetAugmentation
from ...datasets.utils.operations import filter_by_min_points, points_in_box
from ...utils import MODEL
from ..modules.schedulers import OneCycleScheduler
from ..utils.roipool3d import roipool3d_utils
from ...metrics import iou_3d
class PointRCNN(BaseModel):
"""Object detection model. Based on the PoinRCNN architecture
https://github.com/sshaoshuai/PointRCNN.
The network is not trainable end-to-end, it requires pre-training of the RPN
module, followed by training of the RCNN module. For this the mode must be
set to 'RPN', with this, the network only outputs intermediate results. If
the RPN module is trained, the mode can be set to 'RCNN' (default), with
this, the second module can be trained and the output are the final
predictions.
For inference use the 'RCNN' mode.
Args:
name (string): Name of model.
Default to "PointRCNN".
device (string): 'cuda' or 'cpu'.
Default to 'cuda'.
classes (string[]): List of classes used for object detection:
Default to ['Car'].
score_thres (float): Min confindence score for prediction.
Default to 0.3.
npoints (int): Number of processed input points.
Default to 16384.
rpn (dict): Config of RPN module.
Default to {}.
rcnn (dict): Config of RCNN module.
Default to {}.
mode (string): Execution mode, 'RPN' or 'RCNN'.
Default to 'RCNN'.
"""
def __init__(self,
name="PointRCNN",
classes=['Car'],
score_thres=0.3,
npoints=16384,
rpn={},
rcnn={},
mode="RCNN",
**kwargs):
super().__init__(name=name, **kwargs)
assert mode == "RPN" or mode == "RCNN"
self.mode = mode
self.npoints = npoints
self.classes = classes
self.name2lbl = {n: i for i, n in enumerate(classes)}
self.lbl2name = {i: n for i, n in enumerate(classes)}
self.score_thres = score_thres
self.rpn = RPN(**rpn)
self.rcnn = RCNN(num_classes=len(self.classes), **rcnn)
if self.mode == "RCNN":
self.rpn.trainable = False
else:
self.rcnn.trainable = False
def call(self, inputs, training=True):
cls_score, reg_score, backbone_xyz, backbone_features = self.rpn(
inputs[0], training=self.mode == "RPN" and training)
if self.mode != "RPN":
cls_score = tf.stop_gradient(cls_score)
reg_score = tf.stop_gradient(reg_score)
backbone_xyz = tf.stop_gradient(backbone_xyz)
backbone_features = tf.stop_gradient(backbone_features)
rpn_scores_raw = tf.stop_gradient(cls_score[:, :, 0])
rois, _ = self.rpn.proposal_layer(rpn_scores_raw,
reg_score,
backbone_xyz,
training=training) # (B, M, 7)
rois = tf.stop_gradient(rois)
output = {"rois": rois, "cls": cls_score, "reg": reg_score}
if self.mode == "RCNN":
rpn_scores_norm = tf.sigmoid(rpn_scores_raw)
seg_mask = tf.cast((rpn_scores_norm > self.score_thres), tf.float32)
pts_depth = tf.norm(backbone_xyz, ord=2, axis=2)
seg_mask = tf.stop_gradient(seg_mask)
pts_depth = tf.stop_gradient(pts_depth)
gt_boxes = None
if training or self.mode == "RPN":
gt_boxes = inputs[1]
output = self.rcnn(rois,
gt_boxes,
backbone_xyz,
tf.transpose(backbone_features, (0, 2, 1)),
seg_mask,
pts_depth,
training=training)
return output
def get_optimizer(self, cfg):
beta1, beta2 = cfg.get('betas', [0.9, 0.99])
lr_scheduler = OneCycleScheduler(40800, cfg.lr, cfg.div_factor)
optimizer = tf.optimizers.Adam(learning_rate=lr_scheduler,
beta_1=beta1,
beta_2=beta2)
return optimizer
def load_gt_database(self, pickle_path, min_points_dict, sample_dict):
"""Load ground truth object database.
Args:
pickle_path: Path of pickle file generated using `scripts/collect_bbox.py`.
min_points_dict: A dictionary to filter objects based on number of points inside.
sample_dict: A dictionary to decide number of objects to sample.
"""
db_boxes = pickle.load(open(pickle_path, 'rb'))
if min_points_dict is not None:
db_boxes = filter_by_min_points(db_boxes, min_points_dict)
db_boxes_dict = {}
for key in sample_dict.keys():
db_boxes_dict[key] = []
for db_box in db_boxes:
if db_box.label_class in sample_dict.keys():
db_boxes_dict[db_box.label_class].append(db_box)
self.db_boxes_dict = db_boxes_dict
def augment_data(self, data, attr):
"""Augment object detection data.
Available augmentations are:
`ObjectSample`: Insert objects from ground truth database.
`ObjectRangeFilter`: Filter pointcloud from given bounds.
`PointShuffle`: Shuffle the pointcloud.
Args:
data: A dictionary object returned from the dataset class.
attr: Attributes for current pointcloud.
Returns:
Augmented `data` dictionary.
"""
cfg = self.cfg.augment
if 'ObjectSample' in cfg.keys():
if not hasattr(self, 'db_boxes_dict'):
data_path = attr['path']
# remove tail of path to get root data path
for _ in range(3):
data_path = os.path.split(data_path)[0]
pickle_path = os.path.join(data_path, 'bboxes.pkl')
self.load_gt_database(pickle_path, **cfg['ObjectSample'])
data = ObjdetAugmentation.ObjectSample(
data,
db_boxes_dict=self.db_boxes_dict,
sample_dict=cfg['ObjectSample']['sample_dict'])
if cfg.get('ObjectRangeFilter', False):
data = ObjdetAugmentation.ObjectRangeFilter(
data, self.cfg.point_cloud_range)
if cfg.get('PointShuffle', False):
data = ObjdetAugmentation.PointShuffle(data)
return data
def loss(self, results, inputs, training=True):
if self.mode == "RPN":
return self.rpn.loss(results, inputs)
else:
if not training:
return {"loss": tf.constant(0.0)}
return self.rcnn.loss(results, inputs)
def filter_objects(self, bbox_objs):
"""Filter objects based on classes to train.
Args:
bbox_objs: Bounding box objects from dataset class.
Returns:
Filtered bounding box objects.
"""
filtered = []
for bb in bbox_objs:
if bb.label_class in self.classes:
filtered.append(bb)
return filtered
def preprocess(self, data, attr):
if attr['split'] in ['train', 'training']:
data = self.augment_data(data, attr)
data['bounding_boxes'] = self.filter_objects(data['bounding_boxes'])
# remove intensity
points = np.array(data['point'][..., :3], dtype=np.float32)
calib = data['calib']
# transform in cam space
points = DataProcessing.world2cam(points, calib['world_cam'])
new_data = {'point': points, 'calib': calib}
# bounding_boxes are objects of type BEVBox3D. It is renamed to
# bbox_objs to clarify them as objects and not matrix of type [N, 7].
if attr['split'] not in ['test', 'testing']:
new_data['bbox_objs'] = data['bounding_boxes']
return new_data
@staticmethod
def generate_rpn_training_labels(points, bboxes, bboxes_world, calib=None):
"""Generates labels for RPN network.
Classifies each point as foreground/background based on points inside bbox.
We don't train on ambigious points which are just outside bounding boxes(calculated
by `extended_boxes`).
Also computes regression labels for bounding box proposals(in bounding box frame).
Args:
points: Input pointcloud.
bboxes: bounding boxes in camera frame.
bboxes_world: bounding boxes in world frame.
calib: Calibration file for cam_to_world matrix.
Returns:
Classification and Regression labels.
"""
cls_label = np.zeros((points.shape[0]), dtype=np.int32)
reg_label = np.zeros((points.shape[0], 7),
dtype=np.float32) # dx, dy, dz, ry, h, w, l
if len(bboxes) == 0:
return cls_label, reg_label
pts_idx = points_in_box(points.copy(),
bboxes_world,
camera_frame=True,
cam_world=DataProcessing.invT(
calib['world_cam']))
# enlarge the bbox3d, ignore nearby points
extended_boxes = bboxes_world.copy()
# Enlarge box by 0.4m (from PointRCNN paper).
extended_boxes[3:6] += 0.4
# Decrease z coordinate, as z_center is at bottom face of box.
extended_boxes[:, 2] -= 0.2
pts_idx_ext = points_in_box(points.copy(),
extended_boxes,
camera_frame=True,
cam_world=DataProcessing.invT(
calib['world_cam']))
for k in range(bboxes.shape[0]):
fg_pt_flag = pts_idx[:, k]
fg_pts_rect = points[fg_pt_flag]
cls_label[fg_pt_flag] = 1
fg_enlarge_flag = pts_idx_ext[:, k]
ignore_flag = np.logical_xor(fg_pt_flag, fg_enlarge_flag)
cls_label[ignore_flag] = -1
# pixel offset of object center
center3d = bboxes[k][0:3].copy() # (x, y, z)
center3d[1] -= bboxes[k][3] / 2
reg_label[fg_pt_flag, 0:3] = center3d - fg_pts_rect
# size and angle encoding
reg_label[fg_pt_flag, 3] = bboxes[k][3] # h
reg_label[fg_pt_flag, 4] = bboxes[k][4] # w
reg_label[fg_pt_flag, 5] = bboxes[k][5] # l
reg_label[fg_pt_flag, 6] = bboxes[k][6] # ry
return cls_label, reg_label
def transform(self, data, attr):
points = data['point']
if attr['split'] not in ['test', 'testing']: #, 'val', 'validation']:
if self.npoints < len(points):
pts_depth = points[:, 2]
pts_near_flag = pts_depth < 40.0
far_idxs_choice = np.where(pts_near_flag == 0)[0]
near_idxs = np.where(pts_near_flag == 1)[0]
near_idxs_choice = np.random.choice(near_idxs,
self.npoints -
len(far_idxs_choice),
replace=False)
choice = np.concatenate((near_idxs_choice, far_idxs_choice), axis=0) \
if len(far_idxs_choice) > 0 else near_idxs_choice
np.random.shuffle(choice)
else:
choice = np.arange(0, len(points), dtype=np.int32)
if self.npoints > len(points):
extra_choice = np.random.choice(choice,
self.npoints - len(points),
replace=False)
choice = np.concatenate((choice, extra_choice), axis=0)
np.random.shuffle(choice)
points = points[choice, :]
t_data = {'point': points, 'calib': data['calib']}
if attr['split'] not in ['test', 'testing']:
labels = []
bboxes = []
bboxes_world = []
if len(data['bbox_objs']) != 0:
labels = np.stack([
self.name2lbl.get(bb.label_class, len(self.classes))
for bb in data['bbox_objs']
])
bboxes = np.stack([bb.to_camera() for bb in data['bbox_objs']
]) # Camera frame.
bboxes_world = np.stack(
[bb.to_xyzwhlr() for bb in data['bbox_objs']])
if self.mode == "RPN":
labels, bboxes = PointRCNN.generate_rpn_training_labels(
points, bboxes, bboxes_world, data['calib'])
t_data['labels'] = np.array(labels)
t_data['bbox_objs'] = data['bbox_objs'] # Objects of type BEVBox3D.
if attr['split'] in ['train', 'training'] or self.mode == "RPN":
t_data['bboxes'] = bboxes
return t_data
def inference_end(self, results, inputs):
if self.mode == 'RPN':
return [[]]
roi_boxes3d = results['rois'] # (B, M, 7)
batch_size = roi_boxes3d.shape[0]
rcnn_cls = tf.reshape(results['cls'],
(batch_size, -1, results['cls'].shape[1]))
rcnn_reg = tf.reshape(results['reg'],
(batch_size, -1, results['reg'].shape[1]))
pred_boxes3d, rcnn_cls = self.rcnn.proposal_layer(rcnn_cls,
rcnn_reg,
roi_boxes3d,
training=False)
inference_result = []
for calib, bboxes, scores in zip(inputs[3], pred_boxes3d, rcnn_cls):
# scoring
if scores.shape[-1] == 1:
scores = tf.sigmoid(scores)
labels = tf.cast(scores < self.score_thres, tf.int64)
else:
labels = tf.argmax(scores)
scores = tf.nn.softmax(scores, axis=0)
scores = scores[labels]
fltr = tf.reshape(scores > self.score_thres, (-1))
bboxes = bboxes[fltr]
labels = labels[fltr]
scores = scores[fltr]
bboxes = bboxes.numpy()
scores = scores.numpy()
labels = labels.numpy()
inference_result.append([])
world_cam, cam_img = calib.numpy()
for bbox, score, label in zip(bboxes, scores, labels):
pos = bbox[:3]
dim = bbox[[4, 3, 5]]
# transform into world space
pos = DataProcessing.cam2world(pos.reshape((1, -1)),
world_cam).flatten()
pos = pos + [0, 0, dim[1] / 2]
yaw = bbox[-1]
name = self.lbl2name.get(label[0], "ignore")
inference_result[-1].append(
BEVBox3D(pos, dim, yaw, name, score, world_cam, cam_img))
return inference_result
def get_batch_gen(self, dataset, steps_per_epoch=None, batch_size=1):
def batcher():
count = len(dataset) if steps_per_epoch is None else steps_per_epoch
for i in np.arange(0, count, batch_size):
batch = [dataset[i + bi]['data'] for bi in range(batch_size)]
points = tf.stack([b['point'] for b in batch], axis=0)
bboxes = [
b.get('bboxes', tf.zeros((0, 7), dtype=tf.float32))
for b in batch
]
max_gt = 0
for bbox in bboxes:
max_gt = max(max_gt, bbox.shape[0])
pad_bboxes = np.zeros((len(bboxes), max_gt, 7),
dtype=np.float32)
for j in range(len(bboxes)):
pad_bboxes[j, :bboxes[j].shape[0], :] = bboxes[j]
bboxes = tf.constant(pad_bboxes)
labels = [
b.get('labels', tf.zeros((0,), dtype=tf.int32))
for b in batch
]
max_lab = 0
for lab in labels:
max_lab = max(max_lab, lab.shape[0])
if 'labels' in batch[
0] and labels[0].shape[0] != points.shape[1]:
pad_labels = np.ones(
(len(labels), max_lab), dtype=np.int32) * (-1)
for j in range(len(labels)):
pad_labels[j, :labels[j].shape[0]] = labels[j]
labels = tf.constant(pad_labels)
else:
labels = tf.stack(labels, axis=0)
calib = [
tf.constant([
b.get('calib', {}).get('world_cam', np.eye(4)),
b.get('calib', {}).get('cam_img', np.eye(4))
]) for b in batch
]
yield (points, bboxes, labels, calib)
gen_func = batcher
gen_types = (tf.float32, tf.float32, tf.int32, tf.float32)
gen_shapes = ([batch_size, None, 3], [batch_size, None,
7], [batch_size,
None], [batch_size, 2, 4, 4])
return gen_func, gen_types, gen_shapes
MODEL._register_module(PointRCNN, 'tf')
def get_reg_loss(pred_reg,
reg_label,
loc_scope,
loc_bin_size,
num_head_bin,
anchor_size,
get_xz_fine=True,
get_y_by_bin=False,
loc_y_scope=0.5,
loc_y_bin_size=0.25,
get_ry_fine=False):
"""Bin-based 3D bounding boxes regression loss. See
https://arxiv.org/abs/1812.04244 for more details.
Args:
pred_reg: (N, C)
reg_label: (N, 7) [dx, dy, dz, h, w, l, ry]
loc_scope: Constant
loc_bin_size: Constant
num_head_bin: Constant
anchor_size: (N, 3) or (3)
get_xz_fine: Whether to get fine xz loss.
get_y_by_bin: Whether to divide y coordinate into bin.
loc_y_scope: Scope length for y coordinate.
loc_y_bin_size: Bin size for classifying y coordinate.
get_ry_fine: Whether to use fine yaw loss.
"""
per_loc_bin_num = int(loc_scope / loc_bin_size) * 2
loc_y_bin_num = int(loc_y_scope / loc_y_bin_size) * 2
reg_loss_dict = {}
loc_loss = 0
# xz localization loss
x_offset_label, y_offset_label, z_offset_label = reg_label[:,
0], reg_label[:,
1], reg_label[:,
2]
x_shift = tf.clip_by_value(x_offset_label + loc_scope, 0,
loc_scope * 2 - 1e-3)
z_shift = tf.clip_by_value(z_offset_label + loc_scope, 0,
loc_scope * 2 - 1e-3)
x_bin_label = tf.cast(tf.floor(x_shift / loc_bin_size), tf.int64)
z_bin_label = tf.cast(tf.floor(z_shift / loc_bin_size), tf.int64)
x_bin_l, x_bin_r = 0, per_loc_bin_num
z_bin_l, z_bin_r = per_loc_bin_num, per_loc_bin_num * 2
start_offset = z_bin_r
loss_x_bin = CrossEntropyLoss()(pred_reg[:, x_bin_l:x_bin_r], x_bin_label)
loss_z_bin = CrossEntropyLoss()(pred_reg[:, z_bin_l:z_bin_r], z_bin_label)
reg_loss_dict['loss_x_bin'] = loss_x_bin.numpy()
reg_loss_dict['loss_z_bin'] = loss_z_bin.numpy()
loc_loss += loss_x_bin + loss_z_bin
if get_xz_fine:
x_res_l, x_res_r = per_loc_bin_num * 2, per_loc_bin_num * 3
z_res_l, z_res_r = per_loc_bin_num * 3, per_loc_bin_num * 4
start_offset = z_res_r
x_res_label = x_shift - (
tf.cast(x_bin_label, tf.float32) * loc_bin_size + loc_bin_size / 2)
z_res_label = z_shift - (
tf.cast(z_bin_label, tf.float32) * loc_bin_size + loc_bin_size / 2)
x_res_norm_label = x_res_label / loc_bin_size
z_res_norm_label = z_res_label / loc_bin_size
x_bin_onehot = tf.one_hot(x_bin_label, per_loc_bin_num)
z_bin_onehot = tf.one_hot(z_bin_label, per_loc_bin_num)
loss_x_res = SmoothL1Loss()(tf.reduce_sum(pred_reg[:, x_res_l:x_res_r] *
x_bin_onehot,
axis=1), x_res_norm_label)
loss_z_res = SmoothL1Loss()(tf.reduce_sum(pred_reg[:, z_res_l:z_res_r] *
z_bin_onehot,
axis=1), z_res_norm_label)
reg_loss_dict['loss_x_res'] = loss_x_res.numpy()
reg_loss_dict['loss_z_res'] = loss_z_res.numpy()
loc_loss += loss_x_res + loss_z_res
# y localization loss
if get_y_by_bin:
y_bin_l, y_bin_r = start_offset, start_offset + loc_y_bin_num
y_res_l, y_res_r = y_bin_r, y_bin_r + loc_y_bin_num
start_offset = y_res_r
y_shift = tf.clip_by_value(y_offset_label + loc_y_scope, 0,
loc_y_scope * 2 - 1e-3)
y_bin_label = tf.cast(tf.floor(y_shift / loc_y_bin_size), tf.int64)
y_res_label = y_shift - (tf.cast(y_bin_label, tf.float32) *
loc_y_bin_size + loc_y_bin_size / 2)
y_res_norm_label = y_res_label / loc_y_bin_size
y_bin_onehot = tf.one_hot(y_bin_label, loc_y_bin_num)
loss_y_bin = CrossEntropyLoss()(pred_reg[:, y_bin_l:y_bin_r],
y_bin_label)
loss_y_res = SmoothL1Loss()(tf.reduce_sum(pred_reg[:, y_res_l:y_res_r] *
y_bin_onehot,
axis=1), y_res_norm_label)
reg_loss_dict['loss_y_bin'] = loss_y_bin.numpy()
reg_loss_dict['loss_y_res'] = loss_y_res.numpy()
loc_loss += loss_y_bin + loss_y_res
else:
y_offset_l, y_offset_r = start_offset, start_offset + 1
start_offset = y_offset_r
loss_y_offset = SmoothL1Loss()(tf.reduce_sum(
pred_reg[:, y_offset_l:y_offset_r], axis=1), y_offset_label)
reg_loss_dict['loss_y_offset'] = loss_y_offset.numpy()
loc_loss += loss_y_offset
# angle loss
ry_bin_l, ry_bin_r = start_offset, start_offset + num_head_bin
ry_res_l, ry_res_r = ry_bin_r, ry_bin_r + num_head_bin
ry_label = reg_label[:, 6]
if get_ry_fine:
# divide pi/2 into several bins
angle_per_class = (np.pi / 2) / num_head_bin
ry_label = ry_label % (2 * np.pi) # 0 ~ 2pi
ry_label = tf.where((ry_label > np.pi * 0.5) & (ry_label < np.pi * 1.5),
(ry_label + np.pi) % (2 * np.pi),
ry_label) # (0 ~ pi/2, 3pi/2 ~ 2pi)
shift_angle = (ry_label + np.pi * 0.5) % (2 * np.pi) # (0 ~ pi)
shift_angle = tf.clip_by_value(shift_angle - np.pi * 0.25, 1e-3,
np.pi * 0.5 - 1e-3) # (0, pi/2)
# bin center is (5, 10, 15, ..., 85)
ry_bin_label = tf.cast(tf.floor(shift_angle / angle_per_class),
tf.int64)
ry_res_label = shift_angle - (tf.cast(ry_bin_label, tf.float32) *
angle_per_class + angle_per_class / 2)
ry_res_norm_label = ry_res_label / (angle_per_class / 2)
else:
# divide 2pi into several bins
angle_per_class = (2 * np.pi) / num_head_bin
heading_angle = ry_label % (2 * np.pi) # 0 ~ 2pi
shift_angle = (heading_angle + angle_per_class / 2) % (2 * np.pi)
ry_bin_label = tf.cast(tf.floor(shift_angle / angle_per_class),
tf.int64)
ry_res_label = shift_angle - (tf.cast(ry_bin_label, tf.float32) *
angle_per_class + angle_per_class / 2)
ry_res_norm_label = ry_res_label / (angle_per_class / 2)
ry_bin_onehot = tf.one_hot(ry_bin_label, num_head_bin)
loss_ry_bin = CrossEntropyLoss()(pred_reg[:, ry_bin_l:ry_bin_r],
ry_bin_label)
loss_ry_res = SmoothL1Loss()(tf.reduce_sum(pred_reg[:, ry_res_l:ry_res_r] *
ry_bin_onehot,
axis=1), ry_res_norm_label)
reg_loss_dict['loss_ry_bin'] = loss_ry_bin.numpy()
reg_loss_dict['loss_ry_res'] = loss_ry_res.numpy()
angle_loss = loss_ry_bin + loss_ry_res
# size loss
size_res_l, size_res_r = ry_res_r, ry_res_r + 3
assert pred_reg.shape[1] == size_res_r, '%d vs %d' % (pred_reg.shape[1],
size_res_r)
size_res_norm_label = (reg_label[:, 3:6] - anchor_size) / anchor_size
size_res_norm = pred_reg[:, size_res_l:size_res_r]
size_loss = SmoothL1Loss()(size_res_norm, size_res_norm_label)
# Total regression loss
reg_loss_dict['loss_loc'] = loc_loss
reg_loss_dict['loss_angle'] = angle_loss
reg_loss_dict['loss_size'] = size_loss
return loc_loss, angle_loss, size_loss, reg_loss_dict
class RPN(tf.keras.layers.Layer):
def __init__(self,
backbone={},
cls_in_ch=128,
cls_out_ch=[128],
reg_in_ch=128,
reg_out_ch=[128],
db_ratio=0.5,
head={},
focal_loss={},
loss_weight=[1.0, 1.0],
**kwargs):
super().__init__()
# backbone
self.backbone = Pointnet2MSG(**backbone)
self.proposal_layer = ProposalLayer(**head)
# classification branch
layers = []
for i in range(len(cls_out_ch)):
layers.extend([
tf.keras.layers.Conv1D(cls_out_ch[i],
1,
use_bias=False,
data_format="channels_first"),
tf.keras.layers.BatchNormalization(axis=1,
momentum=0.9,
epsilon=1e-05),
tf.keras.layers.ReLU(),
tf.keras.layers.Dropout(db_ratio)
])
layers.append(
tf.keras.layers.Conv1D(
1,
1,
use_bias=True,
bias_initializer=tf.keras.initializers.Constant(-np.log(
(1 - 0.01) / 0.01)),
data_format="channels_first"))
self.cls_blocks = tf.keras.Sequential(layers)
# regression branch
per_loc_bin_num = int(self.proposal_layer.loc_scope /
self.proposal_layer.loc_bin_size) * 2
if self.proposal_layer.loc_xz_fine:
reg_channel = per_loc_bin_num * 4 + self.proposal_layer.num_head_bin * 2 + 3
else:
reg_channel = per_loc_bin_num * 2 + self.proposal_layer.num_head_bin * 2 + 3
reg_channel = reg_channel + 1 # reg y
layers = []
for i in range(len(reg_out_ch)):
layers.extend([
tf.keras.layers.Conv1D(reg_out_ch[i],
1,
use_bias=False,
data_format="channels_first"),
tf.keras.layers.BatchNormalization(axis=1,
momentum=0.9,
epsilon=1e-05),
tf.keras.layers.ReLU(),
tf.keras.layers.Dropout(db_ratio)
])
layers.append(
tf.keras.layers.Conv1D(
reg_channel,
1,
use_bias=True,
kernel_initializer=tf.keras.initializers.RandomNormal(
stddev=0.001),
data_format="channels_first"))
self.reg_blocks = tf.keras.Sequential(layers)
self.loss_cls = FocalLoss(**focal_loss)
self.loss_weight = loss_weight
def call(self, x, training=True):
backbone_xyz, backbone_features = self.backbone(
x, training=training) # (B, N, 3), (B, C, N)
rpn_cls = tf.transpose(
self.cls_blocks(backbone_features, training=training),
(0, 2, 1)) # (B, N, 1)
rpn_reg = tf.transpose(
self.reg_blocks(backbone_features, training=training),
(0, 2, 1)) # (B, N, C)
return rpn_cls, rpn_reg, backbone_xyz, backbone_features
def loss(self, results, inputs):
rpn_cls = results['cls']
rpn_reg = results['reg']
rpn_reg_label = inputs[1]
rpn_cls_label = inputs[2]
rpn_cls_label_flat = tf.reshape(rpn_cls_label, (-1))
rpn_cls_flat = tf.reshape(rpn_cls, (-1))
fg_mask = (rpn_cls_label_flat > 0)
# focal loss
rpn_cls_target = tf.cast((rpn_cls_label_flat > 0), tf.int32)
pos = tf.cast((rpn_cls_label_flat > 0), tf.float32)
neg = tf.cast((rpn_cls_label_flat == 0), tf.float32)
cls_weights = pos + neg
pos_normalizer = tf.reduce_sum(pos)
cls_weights = cls_weights / tf.maximum(pos_normalizer, 1.0)
rpn_loss_cls = self.loss_cls(rpn_cls_flat,
rpn_cls_target,
cls_weights,
avg_factor=1.0)
# RPN regression loss
point_num = rpn_reg.shape[0] * rpn_reg.shape[1]
fg_sum = tf.reduce_sum(tf.cast(fg_mask, tf.int64)).numpy()
if fg_sum != 0:
loss_loc, loss_angle, loss_size, reg_loss_dict = \
get_reg_loss(tf.reshape(rpn_reg, (point_num, -1))[fg_mask],
tf.reshape(rpn_reg_label, (point_num, 7))[fg_mask],
loc_scope=self.proposal_layer.loc_scope,
loc_bin_size=self.proposal_layer.loc_bin_size,
num_head_bin=self.proposal_layer.num_head_bin,
anchor_size=self.proposal_layer.mean_size,
get_xz_fine=self.proposal_layer.loc_xz_fine,
get_y_by_bin=False,
get_ry_fine=False)
loss_size = 3 * loss_size
rpn_loss_reg = loss_loc + loss_angle + loss_size
else:
rpn_loss_reg = tf.reduce_mean(rpn_reg * 0)
return {
"cls": rpn_loss_cls * self.loss_weight[0],
"reg": rpn_loss_reg * self.loss_weight[1]
}
class RCNN(tf.keras.layers.Layer):
def __init__(
self,
num_classes,
in_channels=128,
SA_config={
"npoints": [128, 32, -1],
"radius": [0.2, 0.4, 100],
"nsample": [64, 64, 64],
"mlps": [[128, 128, 128], [128, 128, 256], [256, 256, 512]]
},
cls_out_ch=[256, 256],
reg_out_ch=[256, 256],
db_ratio=0.5,
use_xyz=True,
xyz_up_layer=[128, 128],
head={},
target_head={},
loss={}):
super().__init__()
self.rcnn_input_channel = 5
self.pool_extra_width = target_head.get("pool_extra_width", 1.0)
self.num_points = target_head.get("num_points", 512)
self.proposal_layer = ProposalLayer(**head)
self.SA_modules = []
for i in range(len(SA_config["npoints"])):
mlps = [in_channels] + SA_config["mlps"][i]
npoint = SA_config["npoints"][
i] if SA_config["npoints"][i] != -1 else None
self.SA_modules.append(
PointnetSAModule(npoint=npoint,
radius=SA_config["radius"][i],
nsample=SA_config["nsample"][i],
mlp=mlps,
use_xyz=use_xyz,
use_bias=True))
in_channels = mlps[-1]
self.xyz_up_layer = gen_CNN([self.rcnn_input_channel] + xyz_up_layer,
conv=tf.keras.layers.Conv2D)
c_out = xyz_up_layer[-1]
self.merge_down_layer = gen_CNN([c_out * 2, c_out],
conv=tf.keras.layers.Conv2D)
# classification layer
cls_channel = 1 if num_classes == 2 else num_classes
layers = []
for i in range(len(cls_out_ch)):
layers.extend([
tf.keras.layers.Conv1D(
cls_out_ch[i],
1,
use_bias=True,
data_format="channels_first",
kernel_initializer=tf.keras.initializers.GlorotNormal(),
bias_initializer=tf.keras.initializers.Constant(0.0)),
tf.keras.layers.ReLU()
])
layers.append(
tf.keras.layers.Conv1D(
cls_channel,
1,
use_bias=True,
data_format="channels_first",
kernel_initializer=tf.keras.initializers.GlorotNormal(),
bias_initializer=tf.keras.initializers.Constant(0.0)))
self.cls_blocks = tf.keras.Sequential(layers)
self.loss_cls = tf.keras.losses.BinaryCrossentropy()
# regression branch
per_loc_bin_num = int(self.proposal_layer.loc_scope /
self.proposal_layer.loc_bin_size) * 2
loc_y_bin_num = int(self.proposal_layer.loc_y_scope /
self.proposal_layer.loc_y_bin_size) * 2
reg_channel = per_loc_bin_num * 4 + self.proposal_layer.num_head_bin * 2 + 3
reg_channel += (1 if not self.proposal_layer.get_y_by_bin else
loc_y_bin_num * 2)
layers = []
for i in range(len(reg_out_ch)):
layers.extend([
tf.keras.layers.Conv1D(
reg_out_ch[i],
1,
use_bias=True,
data_format="channels_first",
kernel_initializer=tf.keras.initializers.GlorotNormal(),
bias_initializer=tf.keras.initializers.Constant(0.0)),
tf.keras.layers.ReLU()
])
layers.append(
tf.keras.layers.Conv1D(
reg_channel,
1,
use_bias=True,
data_format="channels_first",
kernel_initializer=tf.keras.initializers.RandomNormal(
stddev=0.001),
bias_initializer=tf.keras.initializers.Constant(0.0)))
self.reg_blocks = tf.keras.Sequential(layers)
self.proposal_target_layer = ProposalTargetLayer(**target_head)
def _break_up_pc(self, pc):
xyz = pc[..., 0:3]
features = (tf.transpose(pc[..., 3:],
(0, 2, 1)) if pc.shape[-1] > 3 else None)
return xyz, features
def call(self,
roi_boxes3d,
gt_boxes3d,
rpn_xyz,
rpn_features,
seg_mask,
pts_depth,
training=True):
pts_extra_input_list = [tf.expand_dims(seg_mask, axis=2)]
pts_extra_input_list.append(
tf.expand_dims(pts_depth / 70.0 - 0.5, axis=2))
pts_extra_input = tf.concat(pts_extra_input_list, axis=2)
pts_feature = tf.concat((pts_extra_input, rpn_features), axis=2)
if gt_boxes3d is not None:
target = self.proposal_target_layer(
[roi_boxes3d, gt_boxes3d, rpn_xyz, pts_feature])
for k in target:
target[k] = tf.stop_gradient(target[k])
pts_input = tf.concat(
(target['sampled_pts'], target['pts_feature']), axis=2)
target['pts_input'] = pts_input
else:
pooled_features, pooled_empty_flag = roipool3d_utils.roipool3d_gpu(
rpn_xyz,
pts_feature,
roi_boxes3d,
self.pool_extra_width,
sampled_pt_num=self.num_points)
# canonical transformation
batch_size = roi_boxes3d.shape[0]
roi_center = roi_boxes3d[:, :, 0:3]
poss = []
for k in range(batch_size):
pos = pooled_features[k, :, :, :3] - tf.expand_dims(
roi_center[k], axis=1)
pos = rotate_pc_along_y_tf(pos, roi_boxes3d[k, :, 6])
poss.append(pos)
pooled_features = tf.concat(
[tf.stack(poss), pooled_features[:, :, :, 3:]], axis=3)
pts_input = tf.reshape(
pooled_features,
(-1, pooled_features.shape[2], pooled_features.shape[3]))
xyz, features = self._break_up_pc(pts_input)
xyz_input = tf.expand_dims(tf.transpose(
pts_input[..., 0:self.rcnn_input_channel], (0, 2, 1)),
axis=3)
xyz_feature = self.xyz_up_layer(xyz_input, training=training)
rpn_feature = tf.expand_dims(tf.transpose(
pts_input[..., self.rcnn_input_channel:], (0, 2, 1)),
axis=3)
merged_feature = tf.concat((xyz_feature, rpn_feature), axis=1)
merged_feature = self.merge_down_layer(merged_feature,
training=training)
l_xyz, l_features = [xyz], [tf.squeeze(merged_feature, axis=3)]
for i in range(len(self.SA_modules)):
li_xyz, li_features = self.SA_modules[i](l_xyz[i],
l_features[i],
training=training)
l_xyz.append(li_xyz)
l_features.append(li_features)
rcnn_cls = tf.squeeze(tf.transpose(
self.cls_blocks(l_features[-1], training=training), (0, 2, 1)),
axis=1) # (B, 1 or 2)
rcnn_reg = tf.squeeze(tf.transpose(
self.reg_blocks(l_features[-1], training=training), (0, 2, 1)),
axis=1) # (B, C)
ret_dict = {'rois': roi_boxes3d, 'cls': rcnn_cls, 'reg': rcnn_reg}
if gt_boxes3d is not None:
ret_dict.update(target)
return ret_dict
def loss(self, results, inputs):
rcnn_cls = results['cls']
rcnn_reg = results['reg']
cls_label = tf.cast(results['cls_label'], tf.float32)
reg_valid_mask = results['reg_valid_mask']
gt_boxes3d_ct = results['gt_of_rois']
pts_input = results['pts_input']
cls_label_flat = tf.reshape(cls_label, (-1))
# binary cross entropy
rcnn_cls_flat = tf.reshape(rcnn_cls, (-1))
batch_loss_cls = tf.keras.losses.BinaryCrossentropy(reduction="none")(
tf.sigmoid(rcnn_cls_flat), cls_label)
cls_valid_mask = tf.cast((cls_label_flat >= 0), tf.float32)
rcnn_loss_cls = tf.reduce_sum(
batch_loss_cls * cls_valid_mask) / tf.maximum(
tf.reduce_sum(cls_valid_mask), 1.0)
# rcnn regression loss
batch_size = pts_input.shape[0]
fg_mask = (reg_valid_mask > 0)
fg_sum = tf.reduce_sum(tf.cast(fg_mask, tf.int64)).numpy()
if fg_sum != 0:
anchor_size = self.proposal_layer.mean_size
loss_loc, loss_angle, loss_size, reg_loss_dict = \
get_reg_loss(tf.reshape(rcnn_reg, (batch_size, -1))[fg_mask],
tf.reshape(gt_boxes3d_ct, (batch_size, 7))[fg_mask],
loc_scope=self.proposal_layer.loc_scope,
loc_bin_size=self.proposal_layer.loc_bin_size,
num_head_bin=self.proposal_layer.num_head_bin,
anchor_size=anchor_size,
get_xz_fine=True, get_y_by_bin=self.proposal_layer.get_y_by_bin,
loc_y_scope=self.proposal_layer.loc_y_scope, loc_y_bin_size=self.proposal_layer.loc_y_bin_size,
get_ry_fine=True)
loss_size = 3 * loss_size # consistent with old codes
rcnn_loss_reg = loss_loc + loss_angle + loss_size
else:
# Regression loss is zero when no point is classified as foreground.
rcnn_loss_reg = tf.reduce_mean(rcnn_reg * 0)
return {"cls": rcnn_loss_cls, "reg": rcnn_loss_reg}
def rotate_pc_along_y(pc, rot_angle):
"""
Args:
pc: (N, 3+C), (N, 3) is in the rectified camera coordinate.
rot_angle: rad scalar
Returns:
pc: updated pc with XYZ rotated.
"""
cosval = np.cos(rot_angle)
sinval = np.sin(rot_angle)
rotmat = np.array([[cosval, -sinval], [sinval, cosval]])
pc[:, [0, 2]] = np.dot(pc[:, [0, 2]], np.transpose(rotmat))
return pc
class ProposalLayer(tf.keras.layers.Layer):
def __init__(self,
nms_pre=9000,
nms_post=512,
nms_thres=0.85,
nms_post_val=None,
nms_thres_val=None,
mean_size=[1.0],
loc_xz_fine=True,
loc_scope=3.0,
loc_bin_size=0.5,
num_head_bin=12,
get_y_by_bin=False,
get_ry_fine=False,
loc_y_scope=0.5,
loc_y_bin_size=0.25,
post_process=True):
super().__init__()
self.nms_pre = nms_pre
self.nms_post = nms_post
self.nms_thres = nms_thres
self.nms_post_val = nms_post_val
self.nms_thres_val = nms_thres_val
self.mean_size = tf.constant(mean_size)
self.loc_scope = loc_scope
self.loc_bin_size = loc_bin_size
self.num_head_bin = num_head_bin
self.loc_xz_fine = loc_xz_fine
self.get_y_by_bin = get_y_by_bin
self.get_ry_fine = get_ry_fine
self.loc_y_scope = loc_y_scope
self.loc_y_bin_size = loc_y_bin_size
self.post_process = post_process
def call(self, rpn_scores, rpn_reg, xyz, training=True):
batch_size = xyz.shape[0]
proposals = decode_bbox_target(
tf.reshape(xyz, (-1, xyz.shape[-1])),
tf.reshape(rpn_reg, (-1, rpn_reg.shape[-1])),
anchor_size=self.mean_size,
loc_scope=self.loc_scope,
loc_bin_size=self.loc_bin_size,
num_head_bin=self.num_head_bin,
get_xz_fine=self.loc_xz_fine,
get_y_by_bin=self.get_y_by_bin,
get_ry_fine=self.get_ry_fine,
loc_y_scope=self.loc_y_scope,
loc_y_bin_size=self.loc_y_bin_size) # (N, 7)
proposals = tf.reshape(proposals, (batch_size, -1, 7))
nms_post = self.nms_post
nms_thres = self.nms_thres
if not training:
if self.nms_post_val is not None:
nms_post = self.nms_post_val
if self.nms_thres_val is not None:
nms_thres = self.nms_thres_val
if self.post_process:
proposals = tf.concat([
proposals[..., :1], proposals[..., 1:2] +
proposals[..., 3:4] / 2, proposals[..., 2:]
],
axis=-1) # set y as the center of bottom
scores = rpn_scores
sorted_idxs = tf.argsort(scores, axis=1, direction="DESCENDING")
batch_size = scores.shape[0]
ret_bbox3d = []
ret_scores = []
for k in range(batch_size):
scores_single = scores[k]
proposals_single = proposals[k]
order_single = sorted_idxs[k]
scores_single, proposals_single = self.distance_based_proposal(
scores_single, proposals_single, order_single, training)
proposals_tot = proposals_single.shape[0]
ret_bbox3d.append(
tf.concat([
proposals_single,
tf.zeros((nms_post - proposals_tot, 7))
],
axis=0))
ret_scores.append(
tf.concat(
[scores_single,
tf.zeros((nms_post - proposals_tot,))],
axis=0))
ret_bbox3d = tf.stack(ret_bbox3d)
ret_scores = tf.stack(ret_scores)
else:
batch_size = rpn_scores.shape[0]
ret_bbox3d = []
ret_scores = []
for k in range(batch_size):
bev = xywhr_to_xyxyr(
tf.stack([proposals[k, :, i] for i in [0, 2, 3, 5, 6]],
axis=-1))
keep_idx = nms(bev, rpn_scores[k, :, 0], nms_thres)
ret_bbox3d.append(tf.gather(proposals[k], keep_idx))
ret_scores.append(tf.gather(rpn_scores[k], keep_idx))
return ret_bbox3d, ret_scores
def distance_based_proposal(self, scores, proposals, order, training=True):
"""Propose ROIs in two area based on the distance.
Args:
scores: (N)
proposals: (N, 7)
order: (N)
training (bool): Whether we are training?
"""
nms_post = self.nms_post
nms_thres = self.nms_thres
if not training:
if self.nms_post_val is not None:
nms_post = self.nms_post_val
if self.nms_thres_val is not None:
nms_thres = self.nms_thres_val
nms_range_list = [0, 40.0, 80.0]
pre_top_n_list = [
0,
int(self.nms_pre * 0.7), self.nms_pre - int(self.nms_pre * 0.7)
]
post_top_n_list = [
0, int(nms_post * 0.7), nms_post - int(nms_post * 0.7)
]
scores_single_list, proposals_single_list = [], []
# sort by score
scores_ordered = tf.gather(scores, order)
proposals_ordered = tf.gather(proposals, order)
dist = proposals_ordered[:, 2]
first_mask = (dist > nms_range_list[0]) & (dist <= nms_range_list[1])
for i in range(1, len(nms_range_list)):
# get proposal distance mask
dist_mask = ((dist > nms_range_list[i - 1]) &
(dist <= nms_range_list[i]))
if tf.reduce_any(dist_mask):
# this area has points
# reduce by mask
cur_scores = scores_ordered[dist_mask]
cur_proposals = proposals_ordered[dist_mask]
# fetch pre nms top K
cur_scores = cur_scores[:pre_top_n_list[i]]
cur_proposals = cur_proposals[:pre_top_n_list[i]]
else:
assert i == 2, '%d' % i
# this area doesn't have any points, so use rois of first area
cur_scores = scores_ordered[first_mask]
cur_proposals = proposals_ordered[first_mask]
# fetch top K of first area
cur_scores = cur_scores[pre_top_n_list[i -
1]:][:pre_top_n_list[i]]
cur_proposals = cur_proposals[
pre_top_n_list[i - 1]:][:pre_top_n_list[i]]
# oriented nms
bev = xywhr_to_xyxyr(
tf.gather(cur_proposals, [0, 2, 3, 5, 6], axis=1))
keep_idx = nms(bev, cur_scores, nms_thres)
# Fetch post nms top k
keep_idx = keep_idx[:post_top_n_list[i]]
scores_single_list.append(tf.gather(cur_scores, keep_idx))
proposals_single_list.append(tf.gather(cur_proposals, keep_idx))
scores_single = tf.concat(scores_single_list, axis=0)
proposals_single = tf.concat(proposals_single_list, axis=0)
return scores_single, proposals_single
def decode_bbox_target(roi_box3d,
pred_reg,
loc_scope,
loc_bin_size,
num_head_bin,
anchor_size,
get_xz_fine=True,
get_y_by_bin=False,
loc_y_scope=0.5,
loc_y_bin_size=0.25,
get_ry_fine=False):
"""
Args:
roi_box3d: (N, 7)
pred_reg: (N, C)
loc_scope:
loc_bin_size:
num_head_bin:
anchor_size:
get_xz_fine:
get_y_by_bin:
loc_y_scope:
loc_y_bin_size:
get_ry_fine:
"""
per_loc_bin_num = int(loc_scope / loc_bin_size) * 2
loc_y_bin_num = int(loc_y_scope / loc_y_bin_size) * 2
# recover xz localization
x_bin_l, x_bin_r = 0, per_loc_bin_num
z_bin_l, z_bin_r = per_loc_bin_num, per_loc_bin_num * 2
start_offset = z_bin_r
x_bin = tf.argmax(pred_reg[:, x_bin_l:x_bin_r], axis=1)
z_bin = tf.argmax(pred_reg[:, z_bin_l:z_bin_r], axis=1)
pos_x = tf.cast(x_bin,
tf.float32) * loc_bin_size + loc_bin_size / 2 - loc_scope
pos_z = tf.cast(z_bin,
tf.float32) * loc_bin_size + loc_bin_size / 2 - loc_scope
if get_xz_fine:
x_res_l, x_res_r = per_loc_bin_num * 2, per_loc_bin_num * 3
z_res_l, z_res_r = per_loc_bin_num * 3, per_loc_bin_num * 4
start_offset = z_res_r
x_res_norm = tf.gather(pred_reg[:, x_res_l:x_res_r],
x_bin,
batch_dims=1)
z_res_norm = tf.gather(pred_reg[:, z_res_l:z_res_r],
z_bin,
batch_dims=1)
x_res = x_res_norm * loc_bin_size
z_res = z_res_norm * loc_bin_size
pos_x += x_res
pos_z += z_res
# recover y localization
if get_y_by_bin:
y_bin_l, y_bin_r = start_offset, start_offset + loc_y_bin_num
y_res_l, y_res_r = y_bin_r, y_bin_r + loc_y_bin_num
start_offset = y_res_r
y_bin = tf.argmax(pred_reg[:, y_bin_l:y_bin_r], axis=1)
y_res_norm = tf.gather(pred_reg[:, y_res_l:y_res_r],
y_bin,
batch_dims=1)
y_res = y_res_norm * loc_y_bin_size
pos_y = tf.cast(
y_bin, tf.float32
) * loc_y_bin_size + loc_y_bin_size / 2 - loc_y_scope + y_res
pos_y = pos_y + roi_box3d[:, 1]
else:
y_offset_l, y_offset_r = start_offset, start_offset + 1
start_offset = y_offset_r
pos_y = roi_box3d[:, 1] + pred_reg[:, y_offset_l]
# recover ry rotation
ry_bin_l, ry_bin_r = start_offset, start_offset + num_head_bin
ry_res_l, ry_res_r = ry_bin_r, ry_bin_r + num_head_bin
ry_bin = tf.argmax(pred_reg[:, ry_bin_l:ry_bin_r], axis=1)
ry_res_norm = tf.gather(pred_reg[:, ry_res_l:ry_res_r],
ry_bin,
batch_dims=1)
if get_ry_fine:
# divide pi/2 into several bins
angle_per_class = (np.pi / 2) / num_head_bin
ry_res = ry_res_norm * (angle_per_class / 2)
ry = (tf.cast(ry_bin, tf.float32) * angle_per_class +
angle_per_class / 2) + ry_res - np.pi / 4
else:
angle_per_class = (2 * np.pi) / num_head_bin
ry_res = ry_res_norm * (angle_per_class / 2)
# bin_center is (0, 30, 60, 90, 120, ..., 270, 300, 330)
ry = (tf.cast(ry_bin, tf.float32) * angle_per_class + ry_res) % (2 *
np.pi)
ry = tf.where(ry > np.pi, ry - 2 * np.pi, ry)
# recover size
size_res_l, size_res_r = ry_res_r, ry_res_r + 3
assert size_res_r == pred_reg.shape[1]
size_res_norm = pred_reg[:, size_res_l:size_res_r]
hwl = size_res_norm * anchor_size + anchor_size
# shift to original coords
roi_center = roi_box3d[:, 0:3]
shift_ret_box3d = tf.concat(
(tf.reshape(pos_x, (-1, 1)), tf.reshape(
pos_y, (-1, 1)), tf.reshape(pos_z,
(-1, 1)), hwl, tf.reshape(ry, (-1, 1))),
axis=1)
ret_box3d = shift_ret_box3d
if roi_box3d.shape[1] == 7:
roi_ry = roi_box3d[:, 6:7]
ret_box3d = rotate_pc_along_y_tf(shift_ret_box3d, -roi_ry)
ret_box3d = tf.concat([ret_box3d[:, :6], ret_box3d[:, 6:7] + roi_ry],
axis=1)
ret_box3d = tf.concat([
ret_box3d[:, :1] + roi_center[:, :1], ret_box3d[:, 1:2],
ret_box3d[:, 2:3] + roi_center[:, 2:3], ret_box3d[:, 3:]
],
axis=1)
return ret_box3d
def rotate_pc_along_y_tf(pc, rot_angle):
"""
:param pc: (N, 3 + C)
:param rot_angle: (N)
:return:
"""
cosa = tf.reshape(tf.cos(rot_angle), (-1, 1)) # (N, 1)
sina = tf.reshape(tf.sin(rot_angle), (-1, 1)) # (N, 1)
raw_1 = tf.concat([cosa, -sina], axis=1) # (N, 2)
raw_2 = tf.concat([sina, cosa], axis=1) # (N, 2)
R = tf.concat(
(tf.expand_dims(raw_1, axis=1), tf.expand_dims(raw_2, axis=1)),
axis=1) # (N, 2, 2)
pc_temp = tf.reshape(tf.stack([pc[..., 0], pc[..., 2]], axis=-1),
((pc.shape[0], -1, 2))) # (N, 512, 2)
pc_temp = tf.matmul(pc_temp, tf.transpose(R, (0, 2, 1)))
pc_temp = tf.reshape(pc_temp, (pc.shape[:-1] + (2,))) # (N, 512, 2)
pc = tf.concat(
[pc_temp[..., :1], pc[..., 1:2], pc_temp[..., 1:2], pc[..., 3:]],
axis=-1)
return pc
class ProposalTargetLayer(tf.keras.layers.Layer):
def __init__(self,
pool_extra_width=1.0,
num_points=512,
reg_fg_thresh=0.55,
cls_fg_thresh=0.6,
cls_bg_thresh=0.45,
cls_bg_thresh_lo=0.05,
fg_ratio=0.5,
roi_per_image=64,
aug_rot_range=18,
hard_bg_ratio=0.8,
roi_fg_aug_times=10):
super().__init__()
self.pool_extra_width = pool_extra_width
self.num_points = num_points
self.reg_fg_thresh = reg_fg_thresh
self.cls_fg_thresh = cls_fg_thresh
self.cls_bg_thresh = cls_bg_thresh
self.cls_bg_thresh_lo = cls_bg_thresh_lo
self.fg_ratio = fg_ratio
self.roi_per_image = roi_per_image
self.aug_rot_range = aug_rot_range
self.hard_bg_ratio = hard_bg_ratio
self.roi_fg_aug_times = roi_fg_aug_times
def call(self, x):
roi_boxes3d, gt_boxes3d, rpn_xyz, pts_feature = x
batch_rois, batch_gt_of_rois, batch_roi_iou = self.sample_rois_for_rcnn(
roi_boxes3d, gt_boxes3d)
# point cloud pooling
pooled_features, pooled_empty_flag = \
roipool3d_utils.roipool3d_gpu(rpn_xyz, pts_feature, batch_rois, self.pool_extra_width,
sampled_pt_num=self.num_points)
sampled_pts, sampled_features = pooled_features[:, :, :, 0:
3], pooled_features[:, :, :,
3:]
# data augmentation
sampled_pts, batch_rois, batch_gt_of_rois = \
self.data_augmentation(sampled_pts, batch_rois, batch_gt_of_rois)
# canonical transformation
batch_size = batch_rois.shape[0]
roi_ry = batch_rois[:, :, 6:7] % (2 * np.pi)
roi_center = batch_rois[:, :, 0:3]
sampled_pts = sampled_pts - tf.expand_dims(roi_center,
axis=2) # (B, M, 512, 3)
batch_gt_of_rois = tf.concat([
batch_gt_of_rois[:, :, :3] - roi_center,
batch_gt_of_rois[:, :, 3:6], batch_gt_of_rois[:, :, 6:] - roi_ry
],
axis=2)
sampled_pts = tf.unstack(sampled_pts)
batch_gt_of_rois = tf.unstack(batch_gt_of_rois)
for k in range(batch_size):
sampled_pts[k] = rotate_pc_along_y_tf(sampled_pts[k],
batch_rois[k, :, 6])
batch_gt_of_rois[k] = tf.squeeze(rotate_pc_along_y_tf(
tf.expand_dims(batch_gt_of_rois[k], axis=1), roi_ry[k]),
axis=1)
sampled_pts = tf.stack(sampled_pts)
batch_gt_of_rois = tf.stack(batch_gt_of_rois)
# regression valid mask
valid_mask = (pooled_empty_flag == 0)
reg_valid_mask = tf.cast(
((batch_roi_iou > self.reg_fg_thresh) & valid_mask), tf.int64)
# classification label
batch_cls_label = tf.cast((batch_roi_iou > self.cls_fg_thresh),
tf.int64)
invalid_mask = (batch_roi_iou > self.cls_bg_thresh) & (
batch_roi_iou < self.cls_fg_thresh)
batch_cls_label = tf.where(
tf.reduce_any([tf.logical_not(valid_mask), invalid_mask], axis=0),
-1, batch_cls_label)
output_dict = {
'sampled_pts':
tf.reshape(sampled_pts, (-1, self.num_points, 3)),
'pts_feature':
tf.reshape(sampled_features,
(-1, self.num_points, sampled_features.shape[3])),
'cls_label':
tf.reshape(batch_cls_label, (-1)),
'reg_valid_mask':
tf.reshape(reg_valid_mask, (-1)),
'gt_of_rois':
tf.reshape(batch_gt_of_rois, (-1, 7)),
'gt_iou':
tf.reshape(batch_roi_iou, (-1)),
'roi_boxes3d':
tf.reshape(batch_rois, (-1, 7))
}
return output_dict
def sample_rois_for_rcnn(self, roi_boxes3d, gt_boxes3d):
"""
Args:
roi_boxes3d: (B, M, 7)
gt_boxes3d: (B, N, 8) [x, y, z, h, w, l, ry, cls]
Returns:
batch_rois: (B, N, 7)
batch_gt_of_rois: (B, N, 8)
batch_roi_iou: (B, N)
"""
batch_size = roi_boxes3d.shape[0]
fg_rois_per_image = int(np.round(self.fg_ratio * self.roi_per_image))
batch_rois, batch_gt_of_rois, batch_roi_iou = [], [], []
for idx in range(batch_size):
cur_roi, cur_gt = roi_boxes3d[idx], gt_boxes3d[idx]
k = cur_gt.__len__() - 1
while tf.reduce_sum(cur_gt[k]) == 0:
k -= 1
cur_gt = cur_gt[:k + 1]
# include gt boxes in the candidate rois
iou3d = iou_3d(cur_roi.numpy()[:, [0, 1, 2, 5, 3, 4, 6]],
cur_gt[:,
0:7].numpy()[:,
[0, 1, 2, 5, 3, 4, 6]]) # (M, N)
iou3d = tf.constant(iou3d)
gt_assignment = tf.argmax(iou3d, axis=1)
max_overlaps = tf.gather(iou3d, gt_assignment, batch_dims=1)
# sample fg, easy_bg, hard_bg
fg_thresh = min(self.reg_fg_thresh, self.cls_fg_thresh)
fg_inds = tf.reshape(tf.where((max_overlaps >= fg_thresh)), (-1))
# TODO: this will mix the fg and bg when CLS_BG_THRESH_LO < iou < CLS_BG_THRESH
# fg_inds = tf.concat((fg_inds, roi_assignment), axis=0) # consider the roi which has max_iou with gt as fg
easy_bg_inds = tf.reshape(
tf.where((max_overlaps < self.cls_bg_thresh_lo)), (-1))
hard_bg_inds = tf.reshape(
tf.where((max_overlaps < self.cls_bg_thresh) &
(max_overlaps >= self.cls_bg_thresh_lo)), (-1))
fg_num_rois = len(fg_inds.shape)
bg_num_rois = len(hard_bg_inds.shape) + len(easy_bg_inds.shape)
if fg_num_rois > 0 and bg_num_rois > 0:
# sampling fg
fg_rois_per_this_image = min(fg_rois_per_image, fg_num_rois)
rand_num = tf.constant(np.random.permutation(fg_num_rois),
dtype=tf.int64)
fg_inds = tf.gather(fg_inds, rand_num[:fg_rois_per_this_image])
# sampling bg
bg_rois_per_this_image = self.roi_per_image - fg_rois_per_this_image
bg_inds = self.sample_bg_inds(hard_bg_inds, easy_bg_inds,
bg_rois_per_this_image)
elif fg_num_rois > 0 and bg_num_rois == 0:
# sampling fg
rand_num = np.floor(
np.random.rand(self.roi_per_image) * fg_num_rois)
rand_num = tf.constant(rand_num, dtype=tf.int64)
fg_inds = fg_inds[rand_num]
fg_rois_per_this_image = self.roi_per_image
bg_rois_per_this_image = 0
elif bg_num_rois > 0 and fg_num_rois == 0:
# sampling bg
bg_rois_per_this_image = self.roi_per_image
bg_inds = self.sample_bg_inds(hard_bg_inds, easy_bg_inds,
bg_rois_per_this_image)
fg_rois_per_this_image = 0
else:
import pdb
pdb.set_trace()
raise NotImplementedError
# augment the rois by noise
roi_list, roi_iou_list, roi_gt_list = [], [], []
if fg_rois_per_this_image > 0:
fg_rois_src = tf.gather(cur_roi, fg_inds)
gt_of_fg_rois = tf.gather(cur_gt,
tf.gather(gt_assignment, fg_inds))
iou3d_src = tf.gather(max_overlaps, fg_inds)
fg_rois, fg_iou3d = self.aug_roi_by_noise_torch(
fg_rois_src,
gt_of_fg_rois,
iou3d_src,
aug_times=self.roi_fg_aug_times)
roi_list.append(fg_rois)
roi_iou_list.append(fg_iou3d)
roi_gt_list.append(gt_of_fg_rois)
if bg_rois_per_this_image > 0:
bg_rois_src = tf.gather(cur_roi, bg_inds)
gt_of_bg_rois = tf.gather(cur_gt,
tf.gather(gt_assignment, bg_inds))
iou3d_src = tf.gather(max_overlaps, bg_inds)
aug_times = 1 if self.roi_fg_aug_times > 0 else 0
bg_rois, bg_iou3d = self.aug_roi_by_noise_torch(
bg_rois_src, gt_of_bg_rois, iou3d_src, aug_times=aug_times)
roi_list.append(bg_rois)
roi_iou_list.append(bg_iou3d)
roi_gt_list.append(gt_of_bg_rois)
rois = tf.concat(roi_list, axis=0)
iou_of_rois = tf.concat(roi_iou_list, axis=0)
gt_of_rois = tf.concat(roi_gt_list, axis=0)
batch_rois.append(rois)
batch_gt_of_rois.append(gt_of_rois)
batch_roi_iou.append(iou_of_rois)
return tf.stack(batch_rois), tf.stack(batch_gt_of_rois), tf.stack(
batch_roi_iou)
def sample_bg_inds(self, hard_bg_inds, easy_bg_inds,
bg_rois_per_this_image):
if len(hard_bg_inds.shape) > 0 and len(easy_bg_inds.shape) > 0:
hard_bg_rois_num = int(bg_rois_per_this_image * self.hard_bg_ratio)
easy_bg_rois_num = bg_rois_per_this_image - hard_bg_rois_num
# sampling hard bg
rand_idx = tf.constant(np.random.randint(low=0,
high=len(
hard_bg_inds.shape),
size=(hard_bg_rois_num,)),
dtype=tf.int64)
hard_bg_inds = tf.gather(hard_bg_inds, rand_idx)
# sampling easy bg
rand_idx = tf.constant(np.random.randint(low=0,
high=len(
easy_bg_inds.shape),
size=(easy_bg_rois_num,)),
dtype=tf.int64)
easy_bg_inds = tf.gather(easy_bg_inds, rand_idx)
bg_inds = tf.concat([hard_bg_inds, easy_bg_inds], axis=0)
elif len(hard_bg_inds.shape) > 0 and len(easy_bg_inds.shape) == 0:
hard_bg_rois_num = bg_rois_per_this_image
# sampling hard bg
rand_idx = tf.constant(np.random.randint(low=0,
high=len(
hard_bg_inds.shape),
size=(hard_bg_rois_num,)),
dtype=tf.int64)
bg_inds = tf.gather(hard_bg_inds, rand_idx)
elif len(hard_bg_inds.shape) == 0 and len(easy_bg_inds.shape) > 0:
easy_bg_rois_num = bg_rois_per_this_image
# sampling easy bg
rand_idx = tf.constant(np.random.randint(low=0,
high=len(
easy_bg_inds.shape),
size=(easy_bg_rois_num,)),
dtype=tf.int64)
bg_inds = tf.gather(easy_bg_inds, rand_idx)
else:
raise NotImplementedError
return bg_inds
def aug_roi_by_noise_torch(self,
roi_boxes3d,
gt_boxes3d,
iou3d_src,
aug_times=10):
pos_thresh = min(self.reg_fg_thresh, self.cls_fg_thresh)
aug_boxes = []
iou_of_rois = []
for k in range(roi_boxes3d.shape[0]):
temp_iou = cnt = 0
roi_box3d = roi_boxes3d[k]
gt_box3d = tf.reshape(gt_boxes3d[k], (1, 7))
aug_box3d = roi_box3d
keep = True
while temp_iou < pos_thresh and cnt < aug_times:
if np.random.rand() < 0.2:
aug_box3d = roi_box3d # p=0.2 to keep the original roi box
keep = True
else:
aug_box3d = self.random_aug_box3d(roi_box3d)
keep = False
aug_box3d = tf.reshape(aug_box3d, ((1, 7)))
iou3d = iou_3d(aug_box3d.numpy()[:, [0, 1, 2, 5, 3, 4, 6]],
gt_box3d.numpy()[:, [0, 1, 2, 5, 3, 4, 6]])
iou3d = tf.constant(iou3d)
temp_iou = iou3d[0][0]
cnt += 1
aug_boxes.append(tf.reshape(aug_box3d, (-1)))
if cnt == 0 or keep:
iou_of_rois.append(iou3d_src[k])
else:
iou_of_rois.append(temp_iou)
return tf.stack(aug_boxes), tf.stack(iou_of_rois)
@staticmethod
def random_aug_box3d(box3d):
"""
Random shift, scale, orientation.
Args:
box3d: (7) [x, y, z, h, w, l, ry]
"""
# pos_range, hwl_range, angle_range, mean_iou
range_config = [[0.2, 0.1, np.pi / 12,
0.7], [0.3, 0.15, np.pi / 12, 0.6],
[0.5, 0.15, np.pi / 9,
0.5], [0.8, 0.15, np.pi / 6, 0.3],
[1.0, 0.15, np.pi / 3, 0.2]]
idx = tf.constant(np.random.randint(low=0,
high=len(range_config),
size=(1,))[0],
dtype=tf.int64)
pos_shift = ((tf.random.uniform(
(3,)) - 0.5) / 0.5) * range_config[idx][0]
hwl_scale = ((tf.random.uniform(
(3,)) - 0.5) / 0.5) * range_config[idx][1] + 1.0
angle_rot = ((tf.random.uniform(
(1,)) - 0.5) / 0.5) * range_config[idx][2]
aug_box3d = tf.concat([
box3d[0:3] + pos_shift, box3d[3:6] * hwl_scale,
box3d[6:7] + angle_rot
],
axis=0)
return aug_box3d
def data_augmentation(self, pts, rois, gt_of_rois):
"""
Args:
pts: (B, M, 512, 3)
rois: (B, M. 7)
gt_of_rois: (B, M, 7)
"""
batch_size, boxes_num = pts.shape[0], pts.shape[1]
# rotation augmentation
angles = (tf.random.uniform(
(batch_size, boxes_num)) - 0.5 / 0.5) * (np.pi / self.aug_rot_range)
# calculate gt alpha from gt_of_rois
temp_x, temp_z, temp_ry = gt_of_rois[:, :,
0], gt_of_rois[:, :,
2], gt_of_rois[:, :,
6]
temp_beta = tf.atan2(temp_z, temp_x)
gt_alpha = -tf.sign(
temp_beta) * np.pi / 2 + temp_beta + temp_ry # (B, M)
temp_x, temp_z, temp_ry = rois[:, :, 0], rois[:, :, 2], rois[:, :, 6]
temp_beta = tf.atan2(temp_z, temp_x)
roi_alpha = -tf.sign(
temp_beta) * np.pi / 2 + temp_beta + temp_ry # (B, M)
pts = tf.unstack(pts)
gt_of_rois = tf.unstack(gt_of_rois)
rois = tf.unstack(rois)
for k in range(batch_size):
pts[k] = rotate_pc_along_y_tf(pts[k], angles[k])
gt_of_rois[k] = tf.squeeze(rotate_pc_along_y_tf(
tf.expand_dims(gt_of_rois[k], axis=1), angles[k]),
axis=1)
rois[k] = tf.squeeze(rotate_pc_along_y_tf(
tf.expand_dims(rois[k], axis=1), angles[k]),
axis=1)
pts = tf.stack(pts)
gt_of_rois = tf.stack(gt_of_rois)
rois = tf.stack(rois)
# calculate the ry after rotation
temp_x, temp_z = gt_of_rois[:, :, :1], gt_of_rois[:, :, 2:3]
temp_beta = tf.atan2(temp_z, temp_x)
gt_of_rois = tf.concat([
gt_of_rois[:, :, :6],
tf.sign(temp_beta) * np.pi / 2 + tf.expand_dims(gt_alpha, axis=-1) -
temp_beta
],
axis=2)
temp_x, temp_z = rois[:, :, :1], rois[:, :, 2:3]
temp_beta = tf.atan2(temp_z, temp_x)
rois = tf.concat([
rois[:, :, :6],
tf.sign(temp_beta) * np.pi / 2 +
tf.expand_dims(roi_alpha, axis=-1) - temp_beta
],
axis=2)
# scaling augmentation
scales = 1 + ((tf.random.uniform(
(batch_size, boxes_num)) - 0.5) / 0.5) * 0.05
pts = pts * tf.expand_dims(tf.expand_dims(scales, axis=2), axis=3)
gt_of_rois = tf.concat([
gt_of_rois[:, :, :6] * tf.expand_dims(scales, axis=2),
gt_of_rois[:, :, 6:]
],
axis=2)
rois = tf.concat(
[rois[:, :, :6] * tf.expand_dims(scales, axis=2), rois[:, :, 6:]],
axis=2)
# flip augmentation
flip_flag = tf.sign(tf.random.uniform((batch_size, boxes_num, 1)) - 0.5)
pts = tf.concat([
pts[:, :, :, :1] * tf.expand_dims(flip_flag, axis=3), pts[:, :, :,
1:]
],
axis=3)
gt_of_rois = tf.concat(
[gt_of_rois[:, :, :1] * flip_flag, gt_of_rois[:, :, 1:]], axis=2)
# flip orientation: ry > 0: pi - ry, ry < 0: -pi - ry
src_ry = gt_of_rois[:, :, 6:7]
ry = tf.cast((flip_flag == 1), tf.float32) * src_ry + tf.cast(
(flip_flag == -1), tf.float32) * (tf.sign(src_ry) * np.pi - src_ry)
gt_of_rois = tf.concat([gt_of_rois[:, :, :6], ry], axis=2)
rois = tf.concat([rois[:, :, :1] * flip_flag, rois[:, :, 1:]], axis=2)
# flip orientation: ry > 0: pi - ry, ry < 0: -pi - ry
src_ry = rois[:, :, 6:7]
ry = tf.cast((flip_flag == 1), tf.float32) * src_ry + tf.cast(
(flip_flag == -1), tf.float32) * (tf.sign(src_ry) * np.pi - src_ry)
rois = tf.concat([rois[:, :, :6], ry], axis=2)
return pts, rois, gt_of_rois
|
[
"noreply@github.com"
] |
kukuruza.noreply@github.com
|
d97160120fe344b6a36a79e9f1c2c576b060b8b9
|
45623eab5e69c0f2a3d7f0a141d112d0d35790f9
|
/ssbench/ordered_dict.py
|
d5f0aca7471d01e9cc56f2c1f2d1beb144d2e2df
|
[
"Apache-2.0"
] |
permissive
|
peteryj/ssbench
|
e73c2a77d597152877c7b3a022ce3fa77363beef
|
4a1766f8e3287cb0dafa559d24f6a51d64950efc
|
refs/heads/master
| 2020-12-03T03:47:13.510496
| 2016-03-23T06:37:39
| 2016-03-23T06:37:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,058
|
py
|
# {{{ http://code.activestate.com/recipes/576693/ (r9)
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and
# pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular
# dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked
# list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the
# linked list, and the inherited dictionary is updated with the new
# key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor
# nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if
false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update w/o breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the
corresponding value.
If key is not found, d is returned if given, otherwise KeyError is
raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running=None):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if _repr_running is None:
_repr_running = {}
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is
order-sensitive while comparison to a regular mapping is
order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self) == len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
# end of http://code.activestate.com/recipes/576693/ }}}
|
[
"darrell@swiftstack.com"
] |
darrell@swiftstack.com
|
03d4807bf6ae79a977ee60b6b4de35c94aeb6e7f
|
88a5dae03f0304d3fb7add71855d2ddc6d8e28e3
|
/main/ext/__init__.py
|
362e9cace53732e41d9341d5e951472eba630fbc
|
[
"Apache-2.0"
] |
permissive
|
huangpd/Shape
|
eabb59781ac6a055f7b7036fef926023cbcd4882
|
fddbbb765e353584752066f7c839293ebd10c4df
|
refs/heads/master
| 2020-03-26T13:04:22.224367
| 2018-05-10T09:06:10
| 2018-05-10T09:06:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
#-*-coding:utf-8-*-
from flask_bcrypt import Bcrypt
bcrypt = Bcrypt()
from flask_bootstrap import Bootstrap
bootstrap = Bootstrap()
from flask_mail import Mail
mail=Mail()
from flask_login import LoginManager
login_manager = LoginManager()
login_manager.login_view="auth.login_index"
login_manager.session_protection="strong"
login_manager.login_message="登录以获得更多功能"
login_manager.login_message_category="info"
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
fec5927f671f48d0494d8758f058e97cbe129c94
|
0353782639974c650fa042e44d75e92bf7be6fc1
|
/instagram/insta/instafeed/views.py
|
cccd2cf5f41d8e68ff52bb8847187c85cb8c062f
|
[] |
no_license
|
jersobh/DigitalMarketing
|
2d31b5c18f0764c4f352947aa34506d63216feeb
|
6fa679bb964e6ad656415e38227e007db2ae0fda
|
refs/heads/master
| 2021-10-23T15:24:01.939739
| 2019-03-18T12:27:21
| 2019-03-18T12:27:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 455
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
# import InstagramAPI
from InstagramAPI.InstagramAPI import InstagramAPI
from django.http import JsonResponse
def index(request):
api = InstagramAPI("jayabal.al", "jayabal9890@insta")
if(api.login()):
api.getSelfUserFeed()
return JsonResponse(api.LastJson)
return JsonResponse({})
# return HttpResponse("Hello, world. You're at the polls index.")
|
[
"noreply@github.com"
] |
jersobh.noreply@github.com
|
17b24db22bb599a33ad96d9be8572468f4ea1b60
|
42c67fdb3b373e0bf677e9d9d1cf770646c75ba6
|
/tests/test_tutorial/test_using_click/test_tutorial003.py
|
eadd93ee9ea77c644c295b94cd9155c13c10334d
|
[
"MIT"
] |
permissive
|
marcosfelt/typer
|
b8ecc8e65c82044076880105d3ecb2ca0d158c25
|
61a0616ea9b7904c2379c464d0f72d5b7bde270e
|
refs/heads/master
| 2023-08-01T03:25:42.417233
| 2021-09-14T11:11:12
| 2021-09-14T11:11:12
| 406,322,395
| 0
| 0
|
MIT
| 2021-09-14T10:43:32
| 2021-09-14T10:29:20
| null |
UTF-8
|
Python
| false
| false
| 868
|
py
|
import subprocess
from click.testing import CliRunner
from docs_src.using_click import tutorial003 as mod
runner = CliRunner()
def test_cli():
result = runner.invoke(mod.typer_click_object, [])
# TODO: when deprecating Click 7, remove second option
assert "Error: Missing command" in result.stdout or "Usage" in result.stdout
def test_typer():
result = runner.invoke(mod.typer_click_object, ["top"])
assert "The Typer app is at the top level" in result.stdout
def test_click():
result = runner.invoke(mod.typer_click_object, ["hello", "--name", "Camila"])
assert "Hello Camila!" in result.stdout
def test_script():
result = subprocess.run(
["coverage", "run", mod.__file__, "--help"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
)
assert "Usage" in result.stdout
|
[
"noreply@github.com"
] |
marcosfelt.noreply@github.com
|
b1dc61b9b0266ed2642cd5bf9517f09540601de5
|
7abb3d309a011a36247e0b4dcda3759537c45b2c
|
/utils/vb-meta-to-json-topology.py
|
031f8c9a3763b172b8281d83709ffc18311a4b0b
|
[
"BSD-3-Clause"
] |
permissive
|
TomPlano/varbench
|
7937a8a7221117e2d817549eb8ba22746c324869
|
83933380e1876da388dd07a78e554e65f388861b
|
refs/heads/master
| 2020-04-02T14:34:11.376400
| 2018-10-27T19:10:09
| 2018-10-27T19:10:09
| 154,529,766
| 0
| 0
|
BSD-3-Clause
| 2018-10-24T16:01:55
| 2018-10-24T16:01:54
| null |
UTF-8
|
Python
| false
| false
| 2,486
|
py
|
#!/usr/bin/env python
import os
import sys
import getopt
import json
def usage(argv, exit=None):
print "Usage: %s [OPTIONS] <VB metadata file> <VB JSON topology file (output)>" % argv[0]
print " -h (--help) : print help and exit"
print " -v (--vbs-path=) : path to VB Stats python module"
if exit is not None:
sys.exit(exit)
def parse_cmd_line(argc, argv):
opts = []
args = []
cur_path = os.path.dirname(os.path.realpath(__file__))
vb_path = cur_path + "/../vb-stats/"
try:
opts, args = getopt.getopt(
argv[1:],
"hv:",
["help", "vb-path="]
)
except getopt.GetoptError, err:
print >> sys.stderr, err
usage(argv, exit=1)
for o, a in opts:
if o in ("-h", "--help"):
usage(argv, exit=0)
elif o in ("-v", "--vb-path"):
vb_path = a
else:
usage(argv, exit=1)
if len(args) != 2:
usage(argv, exit=1)
return vb_path, args[0], args[1]
def main(argc, argv, envp):
vb_path, meta, json_file = parse_cmd_line(argc, argv)
procs = []
# Try to import vb-path
try:
sys.path.insert(0, vb_path)
from vb_stats import VB_Stats as vbs
except ImportError:
print >> sys.stderr, "Could not import VB_Stats. Please specify path to VB_Stats with '--vbs-path'"
usage(argv, exit=2)
with vbs(meta, load_data=False) as vb:
with open(json_file, "w") as f:
num_processors = vb.num_sockets_per_node * vb.num_cores_per_socket * vb.num_hw_threads_per_core
json.dump({
"processor_info" : {
"num_processors" : num_processors,
"num_sockets" : vb.num_sockets_per_node,
"cores_per_socket" : vb.num_cores_per_socket,
"hw_threads_per_core" : vb.num_hw_threads_per_core
},
# The format of p: [socket, core, hw_thread, os_core]
"processor_list" : [
{
"os_core" : p[3],
"socket" : p[0],
"core" : p[1],
"hw_thread" : p[2]
} for p in vb.processor_map
]
}, f, indent=4)
if __name__ == "__main__":
argv = sys.argv
argc = len(argv)
envp = os.environ
sys.exit(main(argc, argv, envp))
|
[
"brian.kocoloski@wustl.edu"
] |
brian.kocoloski@wustl.edu
|
70eca39c9c9fb18923b83761478de0f263f2fd31
|
fa6caa2382c1f35931153ba0c74ff6555c41c745
|
/backend/base/migrations/0003_product_image.py
|
7be4d74d30250643ca2e68b0d7c8b84ed019b757
|
[] |
no_license
|
devckrishna/Django-React-Ecommerce
|
89c341d0de469ed80939fec9544f56418a09ad90
|
ded75edbff25cfb2bca56c92ae5fce7fcf8afcb6
|
refs/heads/main
| 2023-04-21T22:36:28.720240
| 2021-05-08T17:44:22
| 2021-05-08T17:44:22
| 363,849,001
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 422
|
py
|
# Generated by Django 3.2 on 2021-05-04 14:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0002_order_orderitem_review_shippingaddress'),
]
operations = [
migrations.AddField(
model_name='product',
name='image',
field=models.ImageField(blank=True, null=True, upload_to=''),
),
]
|
[
"dev.c.krishna.123@gmail.com"
] |
dev.c.krishna.123@gmail.com
|
150aa7fcfcf1929a708e94bb4cf3c21158724349
|
99e25489e0e504a6e49da4d9398dbfb8c4fe86a4
|
/Leetcode/二叉树/654-最大二叉树-m.py
|
2c6bc91855e00adb4cedcdeb219ef233ec75e6b9
|
[
"Apache-2.0"
] |
permissive
|
JackeyGuo/Algorithms
|
08e5c5a1067c1bf2642241ad635c683c68dff6d3
|
27185d382a891f4667f67701a60c796fa3a6c1ac
|
refs/heads/main
| 2023-03-27T15:26:28.383100
| 2021-03-18T06:27:05
| 2021-03-18T06:27:05
| 326,852,774
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,017
|
py
|
from typing import List
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def constructMaximumBinaryTree(self, nums: List[int]) -> TreeNode:
"""
先明确根节点做什么?对于构造二叉树的问题,根节点要做的就是把想办法把自己构造出来
"""
# base case
if len(nums) == 0: return None
if len(nums) == 1: return TreeNode(nums[0])
# 第一步:先找数组中的最大值和索引
max_value = max(nums)
index = nums.index(max_value)
# 创建根节点
root = TreeNode(max_value)
# 递归调用构造左右子树
root.left = self.constructMaximumBinaryTree(nums[:index])
root.right = self.constructMaximumBinaryTree(nums[index + 1:])
return root
print(Solution().constructMaximumBinaryTree([3, 2, 1, 6, 0, 5]))
|
[
"1051347391@qq.com"
] |
1051347391@qq.com
|
4dc75a5c5ad9b9adc0eee92205b2a3ec96120685
|
1a220abd21c56728aa3368534506bfc9ced8ad46
|
/프로그래머스/lv0/120862. 최댓값 만들기 (2)/최댓값 만들기 (2).py
|
2150e823f28bad1d9f1692f23f12517ff6e88e54
|
[] |
no_license
|
JeonJe/Algorithm
|
0ff0cbf47900e7877be077e1ffeee0c1cd50639a
|
6f8da6dbeef350f71b7c297502a37f87eb7d0823
|
refs/heads/main
| 2023-08-23T11:08:17.781953
| 2023-08-23T08:31:41
| 2023-08-23T08:31:41
| 197,085,186
| 0
| 0
| null | 2023-02-21T03:26:41
| 2019-07-15T23:22:55
|
Python
|
UTF-8
|
Python
| false
| false
| 630
|
py
|
def solution(numbers):
answer = 0
negative = []
positive = []
for i in numbers:
if i < 0:
negative.append(i)
else:
positive.append(i)
negative.sort()
positive.sort()
max_positive, max_negative, mix = -1e9, -1e9, -1e9
if len(positive) == 1 and len(negative) == 1:
mix = positive[-1] * negative[0]
if len(positive) >= 2:
max_positive = positive[-1] * positive[-2]
if len(negative) >= 2:
max_negative = negative[0] * negative[1]
answer = max(max_positive, max_negative, mix)
return answer
|
[
"43032391+JeonJe@users.noreply.github.com"
] |
43032391+JeonJe@users.noreply.github.com
|
16f6244485e0802abe75dcdcc1068f2bde02f77f
|
70da894645a6f3fe362a60de843b1998e2d619eb
|
/Questao7.py
|
839a87331b7746d5e1badb8dbfcb0b2368f9e6e3
|
[] |
no_license
|
marcelorvergara/AT_python
|
2ed9ff3a782ec7b13f1f05909870d7a9013fb20b
|
77cfc84e9e1b624e45a2e3f45e0bb99b32170f68
|
refs/heads/main
| 2023-08-05T00:45:27.688566
| 2021-09-20T16:27:51
| 2021-09-20T16:27:51
| 408,164,732
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,329
|
py
|
import threading
import requests
class Questao7(threading.Thread):
def __init__(self):
super().__init__()
url = 'https://sites.google.com/site/dr2fundamentospython/arquivos/Winter_Olympics_Medals.csv'
requisicao = requests.get(url, timeout=5)
if requisicao.status_code != 200:
requisicao.raise_for_status()
else:
print("Conectado")
csv = requisicao.text
linhas = csv.splitlines()
# SWE
suecia = 0
sue_medalhas = []
# DEN
dinamarca = 0
den_medalhas = []
# NOR
nor_medalhas = []
noruega = 0
for ln in range(1, len(linhas)):
colunas = linhas[ln].split(',')
# somente séc. XXI
if int(colunas[0]) > 2000:
# somente modalidades 'curling', 'skating', 'skiing', 'ice hockey'
if colunas[2] == 'Curling' or colunas[2] == 'Skating' or colunas[2] == 'Skiing' or colunas[2] == 'Ice Hockey':
# se ouro
if colunas[7] == 'Gold':
gen = ''
if colunas[6] == 'M':
gen = 'masculino'
else:
gen = 'feminino'
if colunas[4] == 'SWE':
suecia += 1
sue_medalhas.append('Esporte: ' + colunas[2] + ' Ano: ' + colunas[0] + ' Cidade: ' + colunas[
1] + ' Gênero: ' + gen)
elif colunas[4] == 'DEN':
dinamarca += 1
den_medalhas.append('Esporte: ' + colunas[2] + ' Ano: ' + colunas[0] + ' Cidade: ' + colunas[
1] + ' Gênero: ' + gen)
elif colunas[4] == 'NOR':
noruega += 1
nor_medalhas.append('Esporte: ' + colunas[2] + ' Ano: ' + colunas[0] + ' Cidade: ' + colunas[
1] + ' Gênero: ' + gen)
maior = ''
num_medalhas = 0
if suecia > dinamarca or suecia > noruega:
maior = 'Suecia'
num_medalhas = suecia
if dinamarca > suecia or dinamarca > noruega:
maior = 'Dinamarca'
num_medalhas = dinamarca
else:
maior = 'Noruega'
num_medalhas = noruega
print('\nO país com o maior número de medalhas ouro nas modalidades especificadas é a', maior, 'com', num_medalhas, 'medalhas')
print('\nRelatório dos países Suécia, Dinamarca e Noruega referente as medalhas ouro nos esportes Curling, Patinação no gelo, Esqui e Hóquei sobre o gelo no século XXI')
print('\nSuécia:\n')
if sue_medalhas:
for ln in sue_medalhas:
print(ln)
else:
print('Não obteve medalhas de ouro')
print('\nDinamarca:\n')
if den_medalhas:
for ln in den_medalhas:
print(ln)
else:
print('Não obteve medalhas de ouro')
print('\nNoruega:\n')
if nor_medalhas:
for ln in nor_medalhas:
print(ln)
else:
print('Não obteve medalhas de ouro')
|
[
"marcelorv@gmail.com"
] |
marcelorv@gmail.com
|
816b87e9a417a4578c92d360b24184834f8c149f
|
1ee27186cf26b646fb231b6e23a28f00959f3ae2
|
/part1_WebScraping.py
|
a1c31a4f1c1f7d0aa1a0f008f9e8268a41460138
|
[] |
no_license
|
A-Yaghoubian/Web-scraping-in-markets-with-predict
|
714dc5da72dc87354867d305ff330380312f0fef
|
aa0c43595b1f2dc9c6920edeea8e94b8bbb0f0ea
|
refs/heads/main
| 2023-07-06T14:03:41.857711
| 2021-04-07T12:22:44
| 2021-04-07T12:22:44
| 355,533,381
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,364
|
py
|
import requests
from bs4 import BeautifulSoup
import mysql.connector
print('Zakhire kardan Brand-Name-Price az site DIGISTYLE dakhel database')
print()
print('INFORMATION YOUR DATABASE')
u = input('Please enter your user of database : ')
p = input('Please enter your pass of database : ')
h = input('Please enter your host of database : ')
cnx = mysql.connector.connect(user=u, password=p, host=h, database='DigiStyle')
# print ('connected to db :)')
cursor = cnx.cursor()
newBrand = list()
newName = list()
newPrice = list()
for i in range(1, 63): #WARNING FOR 2 OR 63
r = requests.get('https://www.digistyle.com/category-men-tee-shirts-and-polos/?pageno=%s&sortby=4' %i)
soup = BeautifulSoup(r.text, 'html.parser')
brand = soup.find_all('span', attrs={'class': 'c-product-item__brand'})
name = soup.find_all('span', attrs={'class': 'c-product-item__name'})
price = soup.find_all('span', attrs={'class': 'c-product-item__price-value'})
for i in range(0, 36):
b = brand[i]
b = str(b)
b = b[36:-7]
n = name[i]
n = str(n)
n = n[35:-7]
p = price[i]
p = str(p)
p = p[42:-7]
sql = 'INSERT INTO Digistyle (brand_of_product, name_of_product, price_of_product) VALUES (%s, %s, %s)'
val = (b, n, p)
cursor.execute(sql, val)
cnx.commit()
cnx.close()
|
[
"noreply@github.com"
] |
A-Yaghoubian.noreply@github.com
|
1c43edf94a27aa6141c51da6fce81d5fda5a3967
|
49c137c3df08de22759879b9aaf61318a073b997
|
/vacancy/migrations/0003_auto_20190404_0502.py
|
98f0f4cbaeb1b1003546462fe38e4127899c1030
|
[] |
no_license
|
ayush024/hrms
|
c7da0068a3df57340f82457e20d4d769d15aba4e
|
31c8f85e4ab5730191334561cdbddf386aafa0a7
|
refs/heads/master
| 2020-05-05T13:34:29.954270
| 2019-04-25T18:11:07
| 2019-04-25T18:11:07
| 180,083,977
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 686
|
py
|
# Generated by Django 2.2 on 2019-04-04 05:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vacancy', '0002_auto_20190331_1938'),
]
operations = [
migrations.AlterField(
model_name='jobs',
name='fooding',
field=models.BooleanField(default=0),
),
migrations.AlterField(
model_name='jobs',
name='insurance',
field=models.BooleanField(default=0),
),
migrations.AlterField(
model_name='jobs',
name='lodging',
field=models.BooleanField(default=0),
),
]
|
[
"aayushdhakal360@gmail.com"
] |
aayushdhakal360@gmail.com
|
cde113aea88eac4418c8c3aebe85bd0a376b8a61
|
4ac3789c709d1b68a506f183a5b053b1137f02db
|
/src/pilot/transition_probs.py
|
916c70bcd1396194212b0a8b24ca39d5a85b7f26
|
[] |
no_license
|
bdyetton/PSleep
|
5c52d3ddf1ecb5b3caf5fd6abd562007b5a8dc1d
|
9b02cf76f4c63923d1acfbaf32c62fe70ccb42b8
|
refs/heads/master
| 2020-12-01T15:28:23.267808
| 2019-12-29T00:06:18
| 2019-12-29T00:06:18
| 230,681,688
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,895
|
py
|
from mednickdb_pyapi.mednickdb_pyapi import MednickAPI
from mednickdb_pysleep import defaults
import os
import matplotlib.pyplot as plt
import seaborn as sns
import bootstrapped.bootstrap as bs
import bootstrapped.compare_functions as bs_compare
import bootstrapped.stats_functions as bs_stats
import numpy as np
import pandas as pd
sleep_stages = {
0:'wake',
1:'stage1',
2:'stage2',
3:'sws',
4:'rem'
}
def compare_dists(data, y_var, by_var, y_level=None, by_levels=None, ax=None):
levels = data[by_var].unique()
if by_levels is not None:
levels = [lev for lev in levels if lev in by_levels]
levels_data = []
for lev in levels:
level_data = data.loc[data[by_var] == lev, y_var].dropna()
if y_level is not None:
level_data = level_data.apply(lambda x: x[y_level]).dropna()
levels_data.append(level_data.astype(float).values)
#Runs boostrapped stats test
is_diff = False
if len(levels) == 2:
diff = bs.bootstrap_ab(*levels_data, stat_func=bs_stats.mean, compare_func=bs_compare.percent_change)
is_diff = (diff.lower_bound > 0 or diff.upper_bound < 0)
if is_diff:
sns.set_style("dark")
else:
sns.set_style("white")
diff_msg = 'Difference: \nZero not in CI' if is_diff else 'No Difference: \nZero in CI'
print(diff, '\n', diff_msg)
# Plotting
for lev in levels_data:
sns.distplot(a=lev, ax=ax)
ax.text(0.3, 0.5, diff_msg, transform=ax.transAxes, size=16, color='r' if is_diff else 'k')
plt.title(y_var.split('.')[-1]+' to '+sleep_stages[y_level]+' for the Cleveland Family Study by '+by_var.split('.')[-1])
plt.ylabel('Probability Density')
plt.legend(levels)
def investigate_trans_probs_by_demographics(data, sleep_stages_to_consider=defaults.stages_to_consider):
data = data.drop(['_id', 'sleep_scoring.sourceid', 'visitid', 'datemodified', 'expired'], axis=1)
data['demographics.age_cat'] = (data['demographics.age'] > 55).map({True: 'Older', False: 'Younger'})
data['demographics.ethnicity'] = data['demographics.ethnicity'].map({'white ': 'white', 'black ': 'black'}) #anything else will get nan
demo_cols = ['subjectid', 'demographics.age_cat', 'demographics.ethnicity', 'demographics.sex']
trans_probs_cols = ['sleep_scoring.trans_prob_from_' + s for s in sleep_stages_to_consider]
cols_we_care_about = demo_cols + trans_probs_cols
data = data.loc[:, cols_we_care_about]
data = data.set_index(demo_cols)
from_and_to_data_cont = []
for trans_probs_col in trans_probs_cols:
from_data = data.loc[:, trans_probs_col] # keep index
from_data = from_data.dropna()
from_and_to_data_np = np.array(from_data.tolist()).astype(float) #not sure why need to conver
from_and_to_data = pd.DataFrame(from_and_to_data_np, columns=sleep_stages_to_consider)
from_and_to_data['from_stage'] = trans_probs_col.split('_')[-1]
from_and_to_data.index = from_data.index
from_and_to_data = from_and_to_data.reset_index()
from_and_to_data = from_and_to_data.melt(id_vars=demo_cols+['from_stage'], value_vars=sleep_stages_to_consider, var_name='to_stage', value_name='prob')
from_and_to_data_cont.append(from_and_to_data)
all_trans_data = pd.concat(from_and_to_data_cont).reset_index(drop=True)
# Plot some data
for by_var in ['demographics.sex', 'demographics.ethnicity', 'demographics.age_cat']:
data_to_plot = all_trans_data.drop(set(demo_cols)-set([by_var]), axis=1).dropna().reset_index(drop=True)
sns.catplot(x='to_stage', y='prob', hue=by_var, row="from_stage",
data=data_to_plot, kind="violin", split=True, height=1.5, aspect=2.5, legend=False)
plt.legend(loc='lower right')
plt.ylim((0, 1))
for to_and_from_stage, data in data_to_plot.groupby(['from_stage', 'to_stage']):
from_stage, to_stage = to_and_from_stage[0], to_and_from_stage[1]
by_data = list(data.groupby(by_var))
diff = bs.bootstrap_ab(by_data[0][1]['prob'].values, by_data[1][1]['prob'].values,
stat_func=bs_stats.mean, compare_func=bs_compare.percent_change)
is_diff = (diff.lower_bound > 0 or diff.upper_bound < 0)
if is_diff:
plt.gcf().axes[sleep_stages_to_consider.index(from_stage)].text(y=0, x=sleep_stages_to_consider.index(to_stage) - 0.1, s='*', color='r', fontsize=18)
plt.show()
if __name__ == '__main__':
med_api = MednickAPI(username=os.environ['mednickapi_username'], password=os.environ['mednickapi_password'])
#Get the data, so easy :)
data = med_api.get_data('studyid=NSRR_CFS', format_as='dataframe_single_index')
print('Got', data.shape[0], 'records')
investigate_trans_probs_by_demographics(data)
|
[
"bdyetton@gmail.com"
] |
bdyetton@gmail.com
|
58e9f0902786c9d6ba075f971c789cd992c620a6
|
9334f5334f2da1283f32b08ef99866202b60ae68
|
/learning_logs/models.py
|
2f576215edee0f56da9ac08ece5ee99ed5365952
|
[] |
no_license
|
ArXaHGeL/Learning-Log
|
e033b9b0471185b7bedaa6e3ad2b367e1e7da64f
|
3b43a173b60b624d9c5615804658151c52127577
|
refs/heads/master
| 2023-02-26T15:21:09.499555
| 2021-02-03T12:09:12
| 2021-02-03T12:09:12
| 335,559,775
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 939
|
py
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Topic(models.Model):
"""A topic that the user is learning."""
text = models.CharField(max_length=200)
date_added = models.DateTimeField(auto_now_add=True)
owner = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
"""Return a string representation of the model."""
return self.text
class Entry(models.Model):
"""Information learned by the user."""
topic = models.ForeignKey(Topic, on_delete=models.CASCADE)
text = models.TextField()
date_added = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name_plural = 'entries'
def __str__(self):
"""Return a string representation of the model."""
if self.text <= self.text[0:50]:
return self.text
else:
return self.text[0:50] + "..."
|
[
"zenit_dimka@mail.ru"
] |
zenit_dimka@mail.ru
|
54b0885bfde6ed3aa0813d94f067a252a79a5d94
|
56ce881112d04617795c00b7e6270efc732894e0
|
/adserver/partner/models.py
|
c09c40e2335b7e103b58f3e6540c608e1c4b2af5
|
[] |
no_license
|
kontinuity/papps
|
0fb515d5ee4300e250a03dfbc326b06f5745613c
|
f40315b5106c7f9c24cab3dff3bd1199081dc617
|
refs/heads/master
| 2020-05-03T11:32:07.456114
| 2010-09-03T09:50:40
| 2010-09-03T09:50:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,113
|
py
|
from django.db import models
from adserver.partner.settings import *
from django.contrib.auth.models import User
from django.db.models.signals import post_save
class Partner(models.Model):
company_name = models.CharField(max_length=255)
company_type = models.PositiveIntegerField(choices=COMPANY_TYPE_CHOICES, default=COMPANY_TYPE_DEFAULT)
company_type_other = models.CharField(max_length=255, blank=True, null=True)
number_of_domains = models.PositiveIntegerField(blank=True, null=True)
hosting_control_panel = models.PositiveIntegerField(choices=HOSTING_CONTROL_PANEL_CHOICES, default=HOSTING_CONTROL_PANEL_DEFAULT)
hosting_control_panel_other = models.CharField(max_length=255, blank=True, null=True)
webmail = models.PositiveIntegerField(choices=WEBMAIL_CHOICES, default=WEBMAIL_DEFAULT)
number_of_users = models.PositiveIntegerField(blank=True, null=True)
user = models.OneToOneField(User)
#def create_partner(sender, instance, created, **kwargs):
# if created:
# profile, created = Partner.objects.get_or_create(user=instance)
#
#post_save.connect(create_partner, sender=User)
|
[
"arif.a@directi.com"
] |
arif.a@directi.com
|
30a56aa3ea447d0f6e641cf2b1c120ab673bb144
|
fe81c95988122057f030cc6c57681e215093c9ba
|
/比赛分享/调参/tiaocan1.py
|
3413b37f817c71bf81d552fb4f4f5e1c94ca54e1
|
[] |
no_license
|
xiexiaoyang/Big-Data-Challenge
|
fbe2bbfa92a603460479e6cf7ff4a6f197af239f
|
2fc6ae26037a98d46cb0735a0e4c744b74ec9fb0
|
refs/heads/master
| 2021-07-19T02:37:58.980208
| 2017-10-22T07:18:24
| 2017-10-22T07:18:24
| 107,832,382
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,637
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 19 19:28:57 2017
@author: Yang
"""
'''调参
1. 理解模型
2. 列出所有的参数
3. 选择对模型提升大的参数
代码错误:
1. kstep = len(randidx) / nfold 改为 kstep = len(randidx) // nfold
2. 'Disbursed' 改为 target
3, Parameter values should be a list. 改为 param_test1 = {'max_depth':list(range(3,10,2)),'min_child_weight':list(range(1,6,2))}
'''
#Import libraries:
import pandas as pd
import numpy as np
import xgboost as xgb
from xgboost.sklearn import XGBClassifier
from sklearn import cross_validation, metrics #Additional scklearn functions
from sklearn.grid_search import GridSearchCV #Perforing grid search
import matplotlib.pylab as plt
from matplotlib.pylab import rcParams
rcParams['figure.figsize'] = 12, 4
train = pd.read_csv(r"G:\比赛分享\data\alltrain.csv")
test= pd.read_csv(r"G:\比赛分享\data\alltest.csv")
target = 'label'
IDcol = 'id'
def modelfit(alg, dtrain, predictors,useTrainCV=True, cv_folds=5, early_stopping_rounds=50):
if useTrainCV:
xgb_param = alg.get_xgb_params()
xgtrain = xgb.DMatrix(dtrain[predictors].values, label=dtrain[target].values)
cvresult = xgb.cv(xgb_param, xgtrain, num_boost_round=alg.get_params()['n_estimators'], nfold=cv_folds,
metrics=['auc'], early_stopping_rounds=early_stopping_rounds, show_progress=True)
alg.set_params(n_estimators=cvresult.shape[0])
#Fit the algorithm on the data
alg.fit(dtrain[predictors], dtrain[target],eval_metric='auc')
#Predict training set:
dtrain_predictions = alg.predict(dtrain[predictors])
dtrain_predprob = alg.predict_proba(dtrain[predictors])[:,1]
#Print model report:
print ("\nModel Report")
print (("Accuracy : %.4g") % metrics.accuracy_score(dtrain[target].values, dtrain_predictions))
print (("AUC Score (Train): %f" )% metrics.roc_auc_score(dtrain[target], dtrain_predprob))
feat_imp = pd.Series(alg.booster().get_fscore()).sort_values(ascending=False)
feat_imp.plot(kind='bar', title='xgb Feature Importances')
plt.ylabel('Feature Importance Score')
'''
一 修正用于调整基于树的参数的学习速率和估计量数
也就是 learning_rate n_estimators 学习速率和树的数量
'''
##Choose all predictors except target & IDcols
predictors = [x for x in train.columns if x not in [target, IDcol]]
xgb1 = XGBClassifier(
learning_rate =0.1,
n_estimators=1000,
max_depth=5,
min_child_weight=1,
gamma=0,
subsample=0.8,
colsample_bytree=0.8,
objective= 'binary:logistic',
nthread=4,
scale_pos_weight=1,
seed=27)
modelfit(xgb1, train, predictors)
###Step 2: Tune max_depth and min_child_weight
#param_test1 = {
# 'max_depth':list(range(3,10,2)),
# 'min_child_weight':list(range(1,6,2))
#}
#gsearch1 = GridSearchCV(estimator = XGBClassifier( learning_rate =0.1, n_estimators=140, max_depth=5,
# min_child_weight=1, gamma=0, subsample=0.8, colsample_bytree=0.8,
# objective= 'binary:logistic', nthread=4, scale_pos_weight=1, seed=27),
# param_grid = param_test1, scoring='roc_auc',n_jobs=4,iid=False, cv=2 )
#
#print(gsearch1.fit(train[predictors],train[target]))
#print(gsearch1.grid_scores_, gsearch1.best_params_, gsearch1.best_score_)
#
#param_test2 = {
# 'max_depth':[4,5,6],
# 'min_child_weight':[4,5,6]
#}
#gsearch2 = GridSearchCV(estimator = XGBClassifier( learning_rate=0.1, n_estimators=140, max_depth=5,
# min_child_weight=2, gamma=0, subsample=0.8, colsample_bytree=0.8,
# objective= 'binary:logistic', nthread=4, scale_pos_weight=1,seed=27),
# param_grid = param_test2, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
#print(gsearch2.fit(train[predictors],train[target]))
#print(gsearch2.grid_scores_, gsearch2.best_params_, gsearch2.best_score_)
###Step 3: Tune gamma
#param_test3 = {
# 'gamma':[i/10.0 for i in range(0,5)]
#}
#gsearch3 = GridSearchCV(estimator = XGBClassifier( learning_rate =0.1, n_estimators=140, max_depth=4,
# min_child_weight=6, gamma=0, subsample=0.8, colsample_bytree=0.8,
# objective= 'binary:logistic', nthread=4, scale_pos_weight=1,seed=27),
# param_grid = param_test3, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
#print(gsearch3.fit(train[predictors],train[target]))
#print(gsearch3.grid_scores_, gsearch3.best_params_, gsearch3.best_score_)
##
#xgb2 = XGBClassifier(
# learning_rate =0.1,
# n_estimators=1000,
# max_depth=4,
# min_child_weight=6,
# gamma=0,
# subsample=0.8,
# colsample_bytree=0.8,
# objective= 'binary:logistic',
# nthread=4,
# scale_pos_weight=1,
# seed=27)
#modelfit(xgb2, train, predictors)
#
###Step 4: Tune subsample and colsample_bytree
#param_test4 = {
# 'subsample':[i/10.0 for i in range(6,10)],
# 'colsample_bytree':[i/10.0 for i in range(6,10)]
#}
#gsearch4 = GridSearchCV(estimator = XGBClassifier( learning_rate =0.1, n_estimators=177, max_depth=4,
# min_child_weight=6, gamma=0, subsample=0.8, colsample_bytree=0.8,
# objective= 'binary:logistic', nthread=4, scale_pos_weight=1,seed=27),
# param_grid = param_test4, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
#print(gsearch4.fit(train[predictors],train[target]))
#print(gsearch4.grid_scores_, gsearch4.best_params_, gsearch4.best_score_)
#
#param_test5 = {
# 'subsample':[i/100.0 for i in range(75,90,5)],
# 'colsample_bytree':[i/100.0 for i in range(75,90,5)]
#}
#gsearch5 = GridSearchCV(estimator = XGBClassifier( learning_rate =0.1, n_estimators=177, max_depth=4,
# min_child_weight=6, gamma=0, subsample=0.8, colsample_bytree=0.8,
# objective= 'binary:logistic', nthread=4, scale_pos_weight=1,seed=27),
# param_grid = param_test5, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
#gsearch5.fit(train[predictors],train[target])
###Step 5: Tuning Regularization Parameters
#param_test6 = {
# 'reg_alpha':[1e-5, 1e-2, 0.1, 1, 100]
#}
#gsearch6 = GridSearchCV(estimator = XGBClassifier( learning_rate =0.1, n_estimators=177, max_depth=4,
# min_child_weight=6, gamma=0.1, subsample=0.8, colsample_bytree=0.8,
# objective= 'binary:logistic', nthread=4, scale_pos_weight=1,seed=27),
# param_grid = param_test6, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
#print(gsearch6.fit(train[predictors],train[target]))
#print(gsearch6.grid_scores_, gsearch6.best_params_, gsearch6.best_score_)
###Step 6: Reducing Learning Rate
#xgb4 = XGBClassifier(
# learning_rate =0.01,
# n_estimators=5000,
# max_depth=4,
# min_child_weight=6,
# gamma=0,
# subsample=0.8,
# colsample_bytree=0.8,
# reg_alpha=0.005,
# objective= 'binary:logistic',
# nthread=4,
# scale_pos_weight=1,
# seed=27)
#modelfit(xgb4, train, predictors)
|
[
"2509039243@qq.com"
] |
2509039243@qq.com
|
6c99d787a87a797b6e5c6afcd4673e6a93bcfa66
|
c1fe9f7093c68d26eed55ceee4769878e8aa6c05
|
/reverse-string.py
|
bc9af688c40e4d38a1949faf89d903cdf39069e6
|
[] |
no_license
|
aadilzbhatti/Small-Challenges
|
781e7b04614d734c176f2d14a61663304316bda5
|
0768974c3c3e5b683e92f7a9cd723dc0456ee55c
|
refs/heads/master
| 2021-11-23T17:24:46.222842
| 2015-02-07T02:21:55
| 2015-02-07T02:21:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
#reverse a string
s = ""
str = input("Enter the string to be reversed: ");
for i in range (0, len(str)):
s += str[len(str) - i-1]
print(s)
|
[
"aadilzbhatti@gmail.com"
] |
aadilzbhatti@gmail.com
|
e7c14001f2ea7bf7b866830cf28b3ffddb0acf8f
|
2f1c9bba6ba14a46f04a861e70dbf6d50d96535b
|
/Map-CS61aBerkeley/tests/08.py
|
c6f01f55e6158ebdbaf8b695866c2ec2a09fb31a
|
[] |
no_license
|
leovcunha/CS_learning_projects
|
8a3ed5ba76ad81a22c7162835b39734726028953
|
4cbd45192738c2850b308b35ee9b0c95de798748
|
refs/heads/master
| 2021-01-20T01:23:02.738739
| 2019-07-02T02:11:07
| 2019-07-02T02:11:07
| 89,267,880
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,929
|
py
|
test = {
'name': 'Problem 8',
'points': 2,
'suites': [
{
'cases': [
{
'answer': '18f4b8f373a149983a060187fb945841',
'choices': [
'a list of restaurants reviewed by the user',
'a list of all possible restaurants',
'a list of ratings for restaurants reviewed by the user'
],
'hidden': False,
'locked': True,
'question': 'In best_predictor, what does the variable reviewed represent?'
},
{
'answer': '6e952a03cc93ab2e76cc6e9be1f58c8e',
'choices': [
'a predictor function, and its r_squared value',
'a predictor function',
'an r_squared value',
'a restaurant'
],
'hidden': False,
'locked': True,
'question': r"""
Given a user, a list of restaurants, and a feature function, what
does find_predictor from Problem 7 return?
"""
},
{
'answer': '6290d50f08bc68e242b1124b49a5e8db',
'choices': [
'the predictor with the highest r_squared',
'the predictor with the lowest r_squared',
'the first predictor in the list',
'an arbitrary predictor'
],
'hidden': False,
'locked': True,
'question': r"""
After getting a list of [predictor, r_squared] pairs,
which predictor should we select?
"""
}
],
'scored': False,
'type': 'concept'
},
{
'cases': [
{
'code': r"""
>>> user = make_user('Cheapskate', [
... make_review('A', 2),
... make_review('B', 5),
... make_review('C', 2),
... make_review('D', 5),
... ])
>>> cluster = [
... make_restaurant('A', [5, 2], [], 4, [
... make_review('A', 5)
... ]),
... make_restaurant('B', [3, 2], [], 2, [
... make_review('B', 5)
... ]),
... make_restaurant('C', [-2, 6], [], 4, [
... make_review('C', 4)
... ]),
... make_restaurant('D', [4, 2], [], 2, [
... make_review('D', 3),
... make_review('D', 4)
... ]),
... ]
>>> fns = [restaurant_price, restaurant_mean_rating]
>>> pred = best_predictor(user, cluster, fns)
>>> [round(pred(r), 5) for r in cluster] # should be a list of decimals
[2.0, 5.0, 2.0, 5.0]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> user = make_user('Cheapskate', [
... make_review('A', 2),
... make_review('B', 5),
... make_review('C', 2),
... make_review('D', 5),
... ])
>>> cluster = [
... make_restaurant('A', [5, 2], [], 4, [
... make_review('A', 5)
... ]),
... make_restaurant('B', [3, 2], [], 2, [
... make_review('B', 5)
... ]),
... make_restaurant('C', [-2, 6], [], 4, [
... make_review('C', 4)
... ]),
... ]
>>> fns = [restaurant_price, restaurant_mean_rating]
>>> pred = best_predictor(user, cluster, fns)
>>> [round(pred(r), 5) for r in cluster]
[2.0, 5.0, 2.0]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> user = make_user('Cheapskate', [
... make_review('A', 2),
... make_review('B', 5),
... make_review('C', 2),
... make_review('D', 5),
... ])
>>> cluster = [
... make_restaurant('A', [5, 2], [], 4, [
... make_review('A', 5)
... ]),
... make_restaurant('B', [3, 2], [], 2, [
... make_review('B', 5)
... ]),
... make_restaurant('C', [-2, 6], [], 4, [
... make_review('C', 4)
... ]),
... ]
>>> fns = [restaurant_mean_rating, restaurant_price]
>>> pred = best_predictor(user, cluster, fns)
>>> [round(pred(r), 5) for r in cluster] # Make sure you're iterating through feature_fns!
[2.0, 5.0, 2.0]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> user = make_user('Cheapskate', [
... make_review('A', 2),
... make_review('B', 5),
... make_review('C', 2),
... make_review('D', 5),
... ])
>>> cluster = [
... make_restaurant('A', [5, 2], [], 4, [
... make_review('A', 5)
... ]),
... make_restaurant('B', [3, 2], [], 2, [
... make_review('B', 5)
... ]),
... make_restaurant('C', [-2, 6], [], 4, [
... make_review('C', 4)
... ]),
... make_restaurant('E', [1, 2], [], 4, [
... make_review('E', 4)
... ]),
... ]
>>> fns = [restaurant_mean_rating, restaurant_price]
>>> pred = best_predictor(user, cluster, fns) # Make sure you're only using user-reviewed restaurants!
>>> [round(pred(r), 5) for r in cluster]
[2.0, 5.0, 2.0, 2.0]
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
>>> import tests.test_functions as test
>>> from recommend import *
""",
'teardown': '',
'type': 'doctest'
},
{
'cases': [
{
'code': r"""
>>> user = make_user('Cheapskate', [
... make_review('A', 2),
... make_review('B', 5),
... make_review('C', 2),
... make_review('D', 5),
... ])
>>> cluster = [
... make_restaurant('A', [5, 2], [], 4, [
... make_review('A', 5)
... ]),
... make_restaurant('B', [3, 2], [], 2, [
... make_review('B', 5)
... ]),
... make_restaurant('C', [-2, 6], [], 4, [
... make_review('C', 4)
... ]),
... make_restaurant('D', [4, 2], [], 2, [
... make_review('D', 3),
... make_review('D', 4)
... ]),
... ]
>>> fns = [restaurant_price, restaurant_mean_rating]
>>> pred = best_predictor(user, cluster, fns)
>>> # Hint: Price is a perfect predictor of this user's ratings,
>>> # so the predicted ratings should equal the user's ratings
>>> [round(pred(r), 5) for r in cluster] # should be a list of decimals
[2.0, 5.0, 2.0, 5.0]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> user = make_user('Cheapskate', [
... make_review('A', 2),
... make_review('B', 5),
... make_review('C', 2),
... make_review('D', 5),
... ])
>>> cluster = [
... make_restaurant('A', [5, 2], [], 4, [
... make_review('A', 5)
... ]),
... make_restaurant('B', [3, 2], [], 2, [
... make_review('B', 5)
... ]),
... make_restaurant('C', [-2, 6], [], 4, [
... make_review('C', 4)
... ]),
... ]
>>> fns = [restaurant_price, restaurant_mean_rating]
>>> pred = best_predictor(user, cluster, fns)
>>> [round(pred(r), 5) for r in cluster]
[2.0, 5.0, 2.0]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> user = make_user('Cheapskate', [
... make_review('A', 2),
... make_review('B', 5),
... make_review('C', 2),
... make_review('D', 5),
... ])
>>> cluster = [
... make_restaurant('A', [5, 2], [], 4, [
... make_review('A', 5)
... ]),
... make_restaurant('B', [3, 2], [], 2, [
... make_review('B', 5)
... ]),
... make_restaurant('C', [-2, 6], [], 4, [
... make_review('C', 4)
... ]),
... ]
>>> fns = [restaurant_mean_rating, restaurant_price]
>>> pred = best_predictor(user, cluster, fns)
>>> [round(pred(r), 5) for r in cluster] # Make sure you're iterating through feature_fns!
[2.0, 5.0, 2.0]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> user = make_user('Cheapskate', [
... make_review('A', 2),
... make_review('B', 5),
... make_review('C', 2),
... make_review('D', 5),
... ])
>>> cluster = [
... make_restaurant('A', [5, 2], [], 4, [
... make_review('A', 5)
... ]),
... make_restaurant('B', [3, 2], [], 2, [
... make_review('B', 5)
... ]),
... make_restaurant('C', [-2, 6], [], 4, [
... make_review('C', 4)
... ]),
... make_restaurant('E', [1, 2], [], 4, [
... make_review('E', 4)
... ]),
... ]
>>> fns = [restaurant_mean_rating, restaurant_price]
>>> pred = best_predictor(user, cluster, fns) # Make sure you're only using user-reviewed restaurants!
>>> [round(pred(r), 5) for r in cluster]
[2.0, 5.0, 2.0, 2.0]
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
>>> import tests.test_functions as test
>>> import recommend
>>> test.swap_implementations(recommend)
>>> from recommend import *
""",
'teardown': r"""
>>> test.restore_implementations(recommend)
""",
'type': 'doctest'
}
]
}
|
[
"lvcunha@gmail.com"
] |
lvcunha@gmail.com
|
af4da04242f2f06729d65a60df595b64a56f4355
|
ba2c77f62e7c9ddc074606cbca94062941dfc760
|
/small_methods.py
|
e2a95b7f1d18121fe30f08b1b169cac48fdcb01f
|
[] |
no_license
|
scaars10/Lazy-Crawler
|
5608888f1ed60bdc951b2b4ba2a17ca7ab173bea
|
4088514571f096531076f4c551eac2ce4912530d
|
refs/heads/master
| 2021-08-09T00:25:45.546129
| 2017-11-11T18:29:06
| 2017-11-11T18:29:06
| 110,369,278
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,890
|
py
|
import os
from stemming.porter2 import stem
# omnipresent_words = ['www.', 'http:', 'https:', '.com', '.in']
def directory_manage(relative_location): # checks if a directory exists or not if not then it creates it itself
base_path = os.path.dirname(os.path.realpath(__file__))
if not os.path.exists(os.path.join(base_path, relative_location)):
os.makedirs(os.path.join(base_path, relative_location))
f = open('Processing_Data\\keywords.txt', 'r')
keywords = []
values = []
line_count = 0
for each_line in f:
line_count += 1
line = each_line.split()
try:
values.append(int(line[1]))
keywords.append(stem(line[0]))
except:
directory_manage('Output\\Errors')
f = open('Output\\Errors\\Keyword_Error.txt','a')
f.write('Check Line No. '+str(line_count)+' in Output\\Errors\\keywords.txt for formatting error\n')
f.close()
f.close()
def sort_links_wrt_importance(links, links_text):
link_importance = []
iterate = 0
while iterate < len(links):
link = stem(links[iterate])
if isinstance(links_text[iterate], str):
link_text = stem(links_text[iterate])
else:
link_text = 'ignore'
# divided_link = link.split('/')
i = 0
strength = 0
while i < len(keywords):
if keywords[i] in link:
strength += values[i]
if isinstance(link_text, str):
if keywords[i] in link_text:
strength += values[i]
i += 1
link_importance.append(strength)
iterate += 1
i = 0
while i < len(links):
j = i
# print('sorting')
while j > 0:
if link_importance[j] > link_importance[j-1]:
temp_link = links[j]
links[j] = links[j-1]
links[j-1] = temp_link
# temp_link_text = links_text[j]
# links_text[j] = links_text[j-1]
# links_text[j-1] = temp_link_text
temp_imp = link_importance[j]
link_importance[j] = link_importance[j-1]
link_importance[j-1] = temp_imp
j -= 1
else:
break
i += 1
return links
|
[
"scaars10@gmail.com"
] |
scaars10@gmail.com
|
b11b5949d9aeb93728df91302c1df74b605ff07c
|
e8dc0309de1dd4d9e4a25bcffdd6f9e9022c153c
|
/Code/wink_detection.py
|
b7c01c51a904d3cc8467f119b3ce1a2f15184b79
|
[] |
no_license
|
FelixFelicis555/Blinking-Keyboard
|
04947fe0b8efacd158d4a698b360233947ee8ef9
|
cd2dd51bfed205780cd46a1f17287015790186d3
|
refs/heads/master
| 2022-02-27T18:35:50.149206
| 2019-11-08T09:52:16
| 2019-11-08T09:52:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,512
|
py
|
import numpy as np
import cv2
import dlib
from scipy.spatial import distance as dist
from gtts import gTTS
import os
language = 'en'
import pyttsx3
engine = pyttsx3.init()
characterdict = {'0':'a','00':'d','000':'j','0000':'n','1':'s','01':'f','001':'k','0001':'y','10':'g','010':'l','0010':'t'}
characterdict['0011']='v'
characterdict['011']='m'
characterdict['11']='h'
characterdict['0100']='b'
characterdict['100']='u'
characterdict['0101']='r'
characterdict['101']='i'
characterdict['0110']='e'
characterdict['110']='o'
characterdict['0111']='c'
characterdict['111']='p'
characterdict['1000']='x'
characterdict['1001']='w'
characterdict['1010']='q'
characterdict['1011']='z'
characterdict['1100']=','
characterdict['1101']='.'
characterdict['1110']='?'
characterdict['1111']=" "
print("Enter a choice whether you want blink keyboard or wink keyboard \n 1.) Blink Keyboard \n 2.) Wink keyboard")
n = int(input())
if n==2:
while True:
print("You have choosen wink keyboard\n")
print("Way of using wink keyboard\n")
print("1.) You will be shown the keyboard structure in front of you\n")
print("2.) will move the pointer to left side\n")
print("3.) Right wink will move the pointer to right side\n")
print("4.) Blink detected when you here beep sound once will fix your character that you want to choose it\n")
print("5.) When you hear the beep sound twice while blinking you will be back to the starting position \n")
print("6.) On the starting node if you blink that means backspace\n")
print("If you understand the rules press 'y' else 'press 'n' \n")
check = input()
if check =='y':
break
text = ""
PREDICTOR_PATH = "./shape_predictor_68_face_landmarks.dat"
stop_flag = 0
FULL_POINTS = list(range(0, 68))
FACE_POINTS = list(range(17, 68))
JAWLINE_POINTS = list(range(0, 17))
RIGHT_EYEBROW_POINTS = list(range(17, 22))
LEFT_EYEBROW_POINTS = list(range(22, 27))
NOSE_POINTS = list(range(27, 36))
RIGHT_EYE_POINTS = list(range(36, 42))
LEFT_EYE_POINTS = list(range(42, 48))
MOUTH_OUTLINE_POINTS = list(range(48, 61))
MOUTH_INNER_POINTS = list(range(61, 68))
EYE_AR_THRESH = 0.23
EYE_AR_CONSEC_FRAMES = 5
counter_left = 0
total_left = 0
counter_right = 0
total_right = 0
counter_blink = 0
total_blink = 0
flag_left,flag_right,flag_blink = 0,0,0
def eye_aspect_ratio(eye):
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
C = dist.euclidean(eye[0], eye[3])
ear = (A + B) / (2.0 * C)
return ear
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(PREDICTOR_PATH)
video_capture = cv2.VideoCapture(0)
image = "base"
text = ""
while True:
ret, frame = video_capture.read()
if ret:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 0)
for rect in rects:
x = rect.left()
y = rect.top()
x1 = rect.right()
y1 = rect.bottom()
landmarks = np.matrix([[p.x, p.y] for p in predictor(frame, rect).parts()])
left_eye = landmarks[LEFT_EYE_POINTS]
right_eye = landmarks[RIGHT_EYE_POINTS]
left_eye_hull = cv2.convexHull(left_eye)
right_eye_hull = cv2.convexHull(right_eye)
cv2.drawContours(frame, [left_eye_hull], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [right_eye_hull], -1, (0, 255, 0), 1)
ear_left = eye_aspect_ratio(left_eye)
ear_right = eye_aspect_ratio(right_eye)
if ear_left >= EYE_AR_THRESH and ear_right >= EYE_AR_THRESH:
counter_blink = 0
counter_left = 0
counter_right = 0
# print("****************************************")
# print("Counter Blink : " , counter_blink)
# print("Counter LEFT : ", counter_left)
# print("Counter Right : ", counter_right)
# print("****************************************")
if counter_blink >= 10:
if counter_blink == 10:
flag_blink = 1
duration = 0.05 # seconds
freq = 440 # Hz
os.system('play -nq -t alsa synth {} sine {}'.format(duration, freq))
if counter_blink == 20:
stop_flag = 1
flag_blink = 0
duration = 0.05 # seconds
freq = 440 # Hz
os.system('play -nq -t alsa synth {} sine {}'.format(duration, freq))
else:
if flag_blink == 1:
total_blink += 1
# print("Blink Occured")
counter_blink = 0
flag_blink = 0
if stop_flag == 1:
image = "base"
counter_blink = 0
flag_blink = 0
if ear_left < EYE_AR_THRESH:
if ear_right < EYE_AR_THRESH :
counter_blink += 1
counter_left = 0
else:
counter_blink = 0
counter_left += 1
counter_right = 0
if counter_left == EYE_AR_CONSEC_FRAMES:
flag_left = 1
duration = 0.05 # seconds
freq = 440 # Hz
os.system('play -nq -t alsa synth {} sine {}'.format(duration, freq))
else:
if flag_left ==1:
total_left += 1
# print("Left eye winked")
counter_left = 0
counter_blink = 0
flag_left = 0
counter_right = 0
else:
if counter_left >= EYE_AR_CONSEC_FRAMES:
flag_left = 1
if ear_right < EYE_AR_THRESH:
if ear_left < EYE_AR_THRESH:
counter_right = 0
pass
else:
counter_blink = 0
counter_right += 1
counter_left = 0
if counter_right == EYE_AR_CONSEC_FRAMES:
flag_right = 1
duration = 0.05 # seconds
freq = 440 # Hz
os.system('play -nq -t alsa synth {} sine {}'.format(duration, freq))
else:
if flag_right == 1:
total_right += 1
# print("Right eye winked")
counter_right = 0
flag_right = 0
counter_blink = 0
counter_left = 0
else:
if counter_right >= EYE_AR_CONSEC_FRAMES:
flag_right = 1
# if ear_left >= EYE_AR_THRESH :
# counter_left = 0
# counter_blink = 0
# if ear_right >= EYE_AR_THRESH:
# counter_right = 0
# counter_blink = 0
cv2.putText(frame, "Wink Left : {}".format(total_left), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)
cv2.putText(frame, "Wink Right: {}".format(total_right), (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)
cv2.putText(frame, "Blink Occured: {}".format(total_blink), (10, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)
if total_left == 1:
if image == "base":
image = ""
image+='0'
total_left = 0
total_right = 0
total_blink = 0
flag_left = 0
flag_right = 0
flag_blink = 0
stop_flag = 0
if total_right == 1:
if image =="base":
image = ""
image+='1'
total_right = 0
total_left = 0
total_blink = 0
flag_left = 0
flag_right = 0
flag_blink = 0
stop_flag = 0
if total_blink == 1:
# print("image is "+image+".jpg")
if image!='base':
text += characterdict[image]
else:
if len(text)!=0:
text = text[:len(text)-1]
# do the required action
image = "base"
total_blink = 0
total_left = 0
total_right = 0
flag_left = 0
flag_right = 0
flag_blink = 0
stop_flag = 0
if len(image)>4:
image=image[:4]
cv2.namedWindow("KeyBoard", cv2.WINDOW_NORMAL)
cv2.moveWindow("KeyBoard",850,20)
ia = cv2.imread(image+".jpg")
ims = cv2.resizeWindow("KeyBoard",550, 400) # Resize image
cv2.imshow("KeyBoard" , ia)
cv2.namedWindow("Faces", cv2.WINDOW_NORMAL)
cv2.moveWindow("Faces",0,20)
ims = cv2.resizeWindow("Faces",800, 700) # Resize image
cv2.imshow("Faces", frame)
cv2.namedWindow("Typed_Text", cv2.WINDOW_NORMAL)
cv2.moveWindow("Typed_Text",850,500)
draw = cv2.imread("draw.jpg")
cv2.resizeWindow("Typed_Text",550,270)
cv2.putText(draw, "Typed Text: {}".format(text), (20, 90), cv2.FONT_HERSHEY_SIMPLEX, 3, (0, 0, 0), 5)
cv2.imshow("Typed_Text" , draw)
ch = 0xFF & cv2.waitKey(1)
if ch == ord('q'):
break
cv2.destroyAllWindows()
elif n==1:
while True:
print("You have choosen Blink keyboard")
print("Way of using Blink keyboard\n")
print("1.) You will be shown the keyboard structure in front of you\n")
print("2.) Shorter blink: When you hear a beep sound first time, will move the pointer to left side\n")
print("3.) Longer blink: When you hear a beep sound second time, will move the pointer to right side\n")
print("4.) Longest Blink: When you hear a beep sound third time, will fix your character that you want to choose it\n")
print("5.) Back to start: When you hear the beep sound 4th time with writing character\n")
print("6.) On the starting node if you blink that means backspace\n")
print("If you understand the rules press 'y' else 'press 'n' \n")
check = input()
if check =='y':
break
text = ""
PREDICTOR_PATH = "./shape_predictor_68_face_landmarks.dat"
FULL_POINTS = list(range(0, 68))
FACE_POINTS = list(range(17, 68))
JAWLINE_POINTS = list(range(0, 17))
RIGHT_EYEBROW_POINTS = list(range(17, 22))
LEFT_EYEBROW_POINTS = list(range(22, 27))
NOSE_POINTS = list(range(27, 36))
RIGHT_EYE_POINTS = list(range(36, 42))
LEFT_EYE_POINTS = list(range(42, 48))
MOUTH_OUTLINE_POINTS = list(range(48, 61))
MOUTH_INNER_POINTS = list(range(61, 68))
EYE_AR_THRESH = 0.25
EYE_AR_CONSEC_FRAMES = 5
counter_blink = 0
total_blink = 0
'''
There are three types of blink
one blink --- Left blink
two blink --- Right blink
three blink --- Select the letter
four blink --- Revert to start
'''
flag_blink_one,flag_blink_two,flag_blink_three,stopflag = 0,0,0,0
count_left,count_right,count_stop = 0,0,0
def eye_aspect_ratio(eye):
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
C = dist.euclidean(eye[0], eye[3])
ear = (A + B) / (2.0 * C)
return ear
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(PREDICTOR_PATH)
video_capture = cv2.VideoCapture(-1)
image = "base"
text = ""
while True:
ret, frame = video_capture.read()
if ret:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 0)
for rect in rects:
x = rect.left()
y = rect.top()
x1 = rect.right()
y1 = rect.bottom()
landmarks = np.matrix([[p.x, p.y] for p in predictor(frame, rect).parts()])
left_eye = landmarks[LEFT_EYE_POINTS]
right_eye = landmarks[RIGHT_EYE_POINTS]
left_eye_hull = cv2.convexHull(left_eye)
right_eye_hull = cv2.convexHull(right_eye)
cv2.drawContours(frame, [left_eye_hull], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [right_eye_hull], -1, (0, 255, 0), 1)
ear_left = eye_aspect_ratio(left_eye)
ear_right = eye_aspect_ratio(right_eye)
# print("****************************************")
# print("Counter Blink : " , counter_blink)
# print("****************************************")
if counter_blink >= 10:
if counter_blink == 10:
flag_blink_one,flag_blink_two,flag_blink_three = 1,0,0
stopflag = 0
duration = 0.05 # seconds
freq = 440 # Hz
os.system('play -nq -t alsa synth {} sine {}'.format(duration, freq))
if counter_blink == 20:
flag_blink_two,flag_blink_one,flag_blink_three = 1,0,0
stopflag = 0
duration = 0.05 # seconds
freq = 440 # Hz
os.system('play -nq -t alsa synth {} sine {}'.format(duration, freq))
if counter_blink == 30:
flag_blink_three,flag_blink_one,flag_blink_two = 1,0,0
stopflag = 0
duration = 0.05 # seconds
freq = 440 # Hz
os.system('play -nq -t alsa synth {} sine {}'.format(duration, freq))
if counter_blink==50:
stopflag = 1
flag_blink_three,flag_blink_one,flag_blink_two = 0,0,0
duration = 0.05 # seconds
freq = 440 # Hz
os.system('play -nq -t alsa synth {} sine {}'.format(duration, freq))
else:
if flag_blink_three == 1:
total_blink += 1
# print("Stop Blink Occured")
counter_blink = 0
count_stop = 1
flag_blink_one,flag_blink_two,flag_blink_three = 0,0,0
count_left = 0
count_right = 0
elif flag_blink_one == 1:
total_blink += 1
# print("Left side blink occured")
counter_blink = 0
flag_blink_one,flag_blink_two,flag_blink_three = 0,0,0
count_left = 1
count_right = 0
count_stop = 0
elif flag_blink_two == 1:
total_blink += 1
# print("Right side blink occured")
counter_blink = 0
flag_blink_one,flag_blink_two,flag_blink_three = 0,0,0
count_left = 0
count_right = 1
count_stop = 0
elif stopflag == 1:
count_left,count_right,count_stop=0,0,0
stopflag = 0
image = 'base'
if ear_left < EYE_AR_THRESH and ear_right < EYE_AR_THRESH:
counter_blink += 1
else:
counter_blink = 0
cv2.putText(frame, "Blink Occured: {}".format(total_blink), (10, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)
if count_left == 1:
if image == "base":
image = ""
image+='0'
count_left = 0
if count_right == 1:
if image =="base":
image = ""
image+='1'
count_right = 0
if count_stop == 1:
if image == "base":
if len(text)!=0:
text = text[:len(text)-1]
# myobj = gTTS(text="backspace", lang=language, slow=False)
# myobj.save("text.mp3")
engine.say("Backspace")
engine.runAndWait()
else:
text += characterdict[image]
# myobj = gTTS(text=characterdict[image], lang=language, slow=False)
# myobj.save("text.mp3")
engine.say(characterdict[image])
engine.runAndWait()
# print("image is "+image+".jpg")
# do the required action
# os.system("mpg321 text.mp3")
image = "base"
count_stop,count_left,count_right = 0,0,0
if len(image)>4:
image=image[:4]
cv2.namedWindow("KeyBoard", cv2.WINDOW_NORMAL)
cv2.moveWindow("KeyBoard",850,20)
ia = cv2.imread(image+".jpg")
ims = cv2.resizeWindow("KeyBoard",550, 400) # Resize image
cv2.imshow("KeyBoard" , ia)
cv2.namedWindow("Faces", cv2.WINDOW_NORMAL)
cv2.moveWindow("Faces",0,20)
ims = cv2.resizeWindow("Faces",800, 700) # Resize image
cv2.imshow("Faces", frame)
cv2.namedWindow("Typed_Text", cv2.WINDOW_NORMAL)
cv2.moveWindow("Typed_Text",850,500)
draw = cv2.imread("draw.jpg")
cv2.resizeWindow("Typed_Text",550,270)
cv2.putText(draw, "Typed Text: {}".format(text), (20, 90), cv2.FONT_HERSHEY_SIMPLEX, 3, (0, 0, 0), 5)
cv2.imshow("Typed_Text" , draw)
ch = 0xFF & cv2.waitKey(1)
if ch == ord('q'):
break
cv2.destroyAllWindows()
else:
print("You entered wrong choice ")
exit(0)
|
[
"bhavyabordia@gmail.com"
] |
bhavyabordia@gmail.com
|
d09ca8a14c8cab0258c427bada63637982b2c608
|
ac0844cbd6258ffc1b15cdde7136a07ef28cb8c1
|
/7_2.py
|
3985ebb570a4ff97b10a6dd4aba43c6eb312940e
|
[] |
no_license
|
nikita1998ivanov/Lab-rabota
|
42948394a6cdb4eaffb8b6f5e801225ba7b8ef80
|
0bb6c283465b11218a5ec24de7645bcbe454754f
|
refs/heads/master
| 2022-06-18T03:03:09.949139
| 2020-05-06T08:55:25
| 2020-05-06T08:55:25
| 261,668,414
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 213
|
py
|
d = {}
with open("students.csv") as f:
next(f)
for line in f:
h, nm, a, db = line.split(";")
d.setdefault(int(h), []).append((nm, int(a), db))
l = d.values()
l = list(l)
l.sort()
print(l)
|
[
"noreply@github.com"
] |
nikita1998ivanov.noreply@github.com
|
c22d3551c3f4ba3d14a4cd5bfa8e93641fd47bd6
|
abca8650e1469641fbfd12cc7c1d33eaffc45c4a
|
/lib/db/format_column_names.py
|
065f11055771b623d570eec195d8030504ac2607
|
[
"MIT"
] |
permissive
|
tunityy/Neon8-Bot
|
2549f2b5fad56b25511289c619bead49cf6be90d
|
3cea7a05356ae5dadd2d7adabbf932bebfb849d8
|
refs/heads/main
| 2023-05-01T07:27:40.644937
| 2021-05-22T23:46:06
| 2021-05-22T23:46:06
| 357,702,062
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,880
|
py
|
from string import capwords
def column_to_text(column_name):
if type(column_name) == str:
return capwords(column_name.replace('_', ' '))
else:
results = [capwords(y.replace('_', ' ')) for y in column_name]
return results
def stat_name_ifs(stat):
st = str(stat).lower()
st = st.replace(' ', '')
st = st.replace('_', '')
# column_name = ['hunger', 'humanity', 'stains', 'current_willpower', 'total_willpower', 'superficial_damage', 'aggravated_damage', 'health']
column_name = ['hunger', 'humanity', 'stains', 'health']
if st in column_name:
return st
else:
# some of these include common or possible typos and misspellings
hung_synonyms = ['currenthunger', 'currenthung', 'hung', 'hun', 'hungry', 'hungerdice', 'hungdice',
'hd', 'bp', 'bloodpool', 'blooddice', 'bd',
'hugn', 'hugner', 'hungre', 'curenthunger', 'curenthung', 'bloop', 'blooppool', 'bloopool']
hum_synonyms = ['hum', 'huemanatee', 'humane', 'human', 'humanty', 'humanit', 'humantiy', 'humanaty']
stains_synonyms = ['stain', 'stian', 'st', 'stians', 'stans']
cwp_synonyms = ['currentwillpower', 'willpower', 'wp', 'currentwp', 'will', 'currentwill', 'currentwp', 'cwill', 'cwp', 'cw', 'willp', 'currentwillp', 'cwillp',
'wilpower', 'curentwillpower', 'current', 'curentwill', 'wil', 'currentwilpower', 'curentwilpower', 'wpwr', 'willpwr', 'wllpwr', 'wlpwr']
twp_synonyms = ['totalwillpower', 'totalwp', 'twp', 'total', 'tot', 'totalwill', 'willpowertotal', 'wptotal', 'willtotal', 'twill', 'tw', 'twillp', 'twillpower',
'totalwilpower', 'totalwil', 'tote', 'totlewillpower', 'totlwillpower', 'totwill', 't', 'totwil', 'totwp', 'to', 'twil']
spr_dmg_synonyms = ['superficialdamage', 'superficial', 'superficialdmg', 'sdmg', 'sdamage', 'sdmg', 'super', 'superdmg',
'supre', 'superficaldamage', 'superficaldmg', 'superfical', 'superfishul', 'superfishuldamage', 'superfishuldmg']
agg_dmg_synonyms = ['aggravateddamage', 'agg', 'aggravated', 'aggr', 'aggdmg', 'aggrdmg', 'aggravateddmg', 'aggra', 'aggdamage', 'admg', 'adamage',
'aggro', 'aggrivated', 'aggrivateddamage', 'aggrivateddmg', 'aggrevated', 'aggrevateddamage', 'aggrevateddmg', 'aggrovated', 'aggrovateddamage', 'aggrovateddmg', 'aggrovateddmg']
health_synonyms = ['hp', 'hitpoints', 'healthpoints', 'healthbar', 'life', 'heal'
'heath', 'healh', 'helth']
if st in hung_synonyms:
return 'hunger'
elif st in hum_synonyms:
return 'humanity'
elif st in stains_synonyms:
return 'stains'
elif st in cwp_synonyms:
return 'current_willpower'
elif st in twp_synonyms:
return 'total_willpower'
elif st in spr_dmg_synonyms:
return 'superficial_damage'
elif st in agg_dmg_synonyms:
return 'aggravated_damage'
elif st in health_synonyms:
return 'health'
else:
return 'Invalid'
#### ----------------------------------------------------------
### TODO: when it's just a list of one word it actually just comes out as a string. Need to change it to a list?
def stat_names_listifier(stats, words_and_numbs=False):
"""`words_and_numbs` is to differentiate when stats is just numbers, or contains words and numbers."""
if words_and_numbs == False:
list_stats = ' '.join(stats).split(', ')
if int(len(list_stats)) == 1:
column_name = [stat_name_ifs(list_stats[0])]
return column_name
else:
list_of_columns = [stat_name_ifs(term) for term in list_stats]
if 'Invalid' in list_of_columns:
return 'Invalid'
else:
return list_of_columns
elif words_and_numbs == True:
items_to_assess = ' '.join(stats).split(', ')
list_stats = [item.rsplit(' ', 1)[0] for item in items_to_assess]
values_list = [item.split(' ')[-1] for item in items_to_assess]
for item in values_list:
try:
int(item)
except:
return 'Invalid'
if int(len(list_stats)) == 1:
column_name = [stat_name_ifs(list_stats[0])]
if column_name == 'Invalid':
return 'Invalid'
else:
return column_name, values_list
else:
list_of_columns = [stat_name_ifs(term) for term in list_stats]
if 'Invalid' in list_of_columns:
return 'Invalid'
else:
return list_of_columns, values_list
#### ----------------------------------------------------------
|
[
"80991664+tunityy@users.noreply.github.com"
] |
80991664+tunityy@users.noreply.github.com
|
8426f5e2a7f3115533abb324288bc031ba59ff53
|
a6e4a6f0a73d24a6ba957277899adbd9b84bd594
|
/sdk/python/pulumi_azure_native/guestconfiguration/outputs.py
|
b1d2bbd2207b1aaffbc05852618b9e218ea32400
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
MisinformedDNA/pulumi-azure-native
|
9cbd75306e9c8f92abc25be3f73c113cb93865e9
|
de974fd984f7e98649951dbe80b4fc0603d03356
|
refs/heads/master
| 2023-03-24T22:02:03.842935
| 2021-03-08T21:16:19
| 2021-03-08T21:16:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 27,362
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._enums import *
__all__ = [
'AssignmentInfoResponse',
'AssignmentReportResourceComplianceReasonResponse',
'AssignmentReportResourceResponse',
'AssignmentReportResponse',
'ConfigurationInfoResponse',
'ConfigurationParameterResponse',
'ConfigurationSettingResponse',
'GuestConfigurationAssignmentPropertiesResponse',
'GuestConfigurationNavigationResponse',
'VMInfoResponse',
]
@pulumi.output_type
class AssignmentInfoResponse(dict):
"""
Information about the guest configuration assignment.
"""
def __init__(__self__, *,
name: str,
configuration: Optional['outputs.ConfigurationInfoResponse'] = None):
"""
Information about the guest configuration assignment.
:param str name: Name of the guest configuration assignment.
:param 'ConfigurationInfoResponseArgs' configuration: Information about the configuration.
"""
pulumi.set(__self__, "name", name)
if configuration is not None:
pulumi.set(__self__, "configuration", configuration)
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the guest configuration assignment.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def configuration(self) -> Optional['outputs.ConfigurationInfoResponse']:
"""
Information about the configuration.
"""
return pulumi.get(self, "configuration")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AssignmentReportResourceComplianceReasonResponse(dict):
"""
Reason and code for the compliance of the guest configuration assignment resource.
"""
def __init__(__self__, *,
code: str,
phrase: str):
"""
Reason and code for the compliance of the guest configuration assignment resource.
:param str code: Code for the compliance of the guest configuration assignment resource.
:param str phrase: Reason for the compliance of the guest configuration assignment resource.
"""
pulumi.set(__self__, "code", code)
pulumi.set(__self__, "phrase", phrase)
@property
@pulumi.getter
def code(self) -> str:
"""
Code for the compliance of the guest configuration assignment resource.
"""
return pulumi.get(self, "code")
@property
@pulumi.getter
def phrase(self) -> str:
"""
Reason for the compliance of the guest configuration assignment resource.
"""
return pulumi.get(self, "phrase")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AssignmentReportResourceResponse(dict):
"""
The guest configuration assignment resource.
"""
def __init__(__self__, *,
compliance_status: str,
properties: Any,
resource_id: str,
reasons: Optional[Sequence['outputs.AssignmentReportResourceComplianceReasonResponse']] = None):
"""
The guest configuration assignment resource.
:param str compliance_status: A value indicating compliance status of the machine for the assigned guest configuration.
:param Any properties: Properties of a guest configuration assignment resource.
:param str resource_id: Name of the guest configuration assignment resource setting.
:param Sequence['AssignmentReportResourceComplianceReasonResponseArgs'] reasons: Compliance reason and reason code for a resource.
"""
pulumi.set(__self__, "compliance_status", compliance_status)
pulumi.set(__self__, "properties", properties)
pulumi.set(__self__, "resource_id", resource_id)
if reasons is not None:
pulumi.set(__self__, "reasons", reasons)
@property
@pulumi.getter(name="complianceStatus")
def compliance_status(self) -> str:
"""
A value indicating compliance status of the machine for the assigned guest configuration.
"""
return pulumi.get(self, "compliance_status")
@property
@pulumi.getter
def properties(self) -> Any:
"""
Properties of a guest configuration assignment resource.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> str:
"""
Name of the guest configuration assignment resource setting.
"""
return pulumi.get(self, "resource_id")
@property
@pulumi.getter
def reasons(self) -> Optional[Sequence['outputs.AssignmentReportResourceComplianceReasonResponse']]:
"""
Compliance reason and reason code for a resource.
"""
return pulumi.get(self, "reasons")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AssignmentReportResponse(dict):
def __init__(__self__, *,
compliance_status: str,
end_time: str,
id: str,
operation_type: str,
report_id: str,
start_time: str,
assignment: Optional['outputs.AssignmentInfoResponse'] = None,
resources: Optional[Sequence['outputs.AssignmentReportResourceResponse']] = None,
vm: Optional['outputs.VMInfoResponse'] = None):
"""
:param str compliance_status: A value indicating compliance status of the machine for the assigned guest configuration.
:param str end_time: End date and time of the guest configuration assignment compliance status check.
:param str id: ARM resource id of the report for the guest configuration assignment.
:param str operation_type: Type of report, Consistency or Initial
:param str report_id: GUID that identifies the guest configuration assignment report under a subscription, resource group.
:param str start_time: Start date and time of the guest configuration assignment compliance status check.
:param 'AssignmentInfoResponseArgs' assignment: Configuration details of the guest configuration assignment.
:param Sequence['AssignmentReportResourceResponseArgs'] resources: The list of resources for which guest configuration assignment compliance is checked.
:param 'VMInfoResponseArgs' vm: Information about the VM.
"""
pulumi.set(__self__, "compliance_status", compliance_status)
pulumi.set(__self__, "end_time", end_time)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "operation_type", operation_type)
pulumi.set(__self__, "report_id", report_id)
pulumi.set(__self__, "start_time", start_time)
if assignment is not None:
pulumi.set(__self__, "assignment", assignment)
if resources is not None:
pulumi.set(__self__, "resources", resources)
if vm is not None:
pulumi.set(__self__, "vm", vm)
@property
@pulumi.getter(name="complianceStatus")
def compliance_status(self) -> str:
"""
A value indicating compliance status of the machine for the assigned guest configuration.
"""
return pulumi.get(self, "compliance_status")
@property
@pulumi.getter(name="endTime")
def end_time(self) -> str:
"""
End date and time of the guest configuration assignment compliance status check.
"""
return pulumi.get(self, "end_time")
@property
@pulumi.getter
def id(self) -> str:
"""
ARM resource id of the report for the guest configuration assignment.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="operationType")
def operation_type(self) -> str:
"""
Type of report, Consistency or Initial
"""
return pulumi.get(self, "operation_type")
@property
@pulumi.getter(name="reportId")
def report_id(self) -> str:
"""
GUID that identifies the guest configuration assignment report under a subscription, resource group.
"""
return pulumi.get(self, "report_id")
@property
@pulumi.getter(name="startTime")
def start_time(self) -> str:
"""
Start date and time of the guest configuration assignment compliance status check.
"""
return pulumi.get(self, "start_time")
@property
@pulumi.getter
def assignment(self) -> Optional['outputs.AssignmentInfoResponse']:
"""
Configuration details of the guest configuration assignment.
"""
return pulumi.get(self, "assignment")
@property
@pulumi.getter
def resources(self) -> Optional[Sequence['outputs.AssignmentReportResourceResponse']]:
"""
The list of resources for which guest configuration assignment compliance is checked.
"""
return pulumi.get(self, "resources")
@property
@pulumi.getter
def vm(self) -> Optional['outputs.VMInfoResponse']:
"""
Information about the VM.
"""
return pulumi.get(self, "vm")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ConfigurationInfoResponse(dict):
"""
Information about the configuration.
"""
def __init__(__self__, *,
name: str,
version: str):
"""
Information about the configuration.
:param str name: Name of the configuration.
:param str version: Version of the configuration.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the configuration.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def version(self) -> str:
"""
Version of the configuration.
"""
return pulumi.get(self, "version")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ConfigurationParameterResponse(dict):
"""
Represents a configuration parameter.
"""
def __init__(__self__, *,
name: Optional[str] = None,
value: Optional[str] = None):
"""
Represents a configuration parameter.
:param str name: Name of the configuration parameter.
:param str value: Value of the configuration parameter.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the configuration parameter.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def value(self) -> Optional[str]:
"""
Value of the configuration parameter.
"""
return pulumi.get(self, "value")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ConfigurationSettingResponse(dict):
"""
Configuration setting of LCM (Local Configuration Manager).
"""
def __init__(__self__, *,
action_after_reboot: Optional[str] = None,
allow_module_overwrite: Optional[str] = None,
configuration_mode: Optional[str] = None,
configuration_mode_frequency_mins: Optional[float] = None,
reboot_if_needed: Optional[str] = None,
refresh_frequency_mins: Optional[float] = None):
"""
Configuration setting of LCM (Local Configuration Manager).
:param str action_after_reboot: Specifies what happens after a reboot during the application of a configuration. The possible values are ContinueConfiguration and StopConfiguration
:param str allow_module_overwrite: If true - new configurations downloaded from the pull service are allowed to overwrite the old ones on the target node. Otherwise, false
:param str configuration_mode: Specifies how the LCM(Local Configuration Manager) actually applies the configuration to the target nodes. Possible values are ApplyOnly, ApplyAndMonitor, and ApplyAndAutoCorrect.
:param float configuration_mode_frequency_mins: How often, in minutes, the current configuration is checked and applied. This property is ignored if the ConfigurationMode property is set to ApplyOnly. The default value is 15.
:param str reboot_if_needed: Set this to true to automatically reboot the node after a configuration that requires reboot is applied. Otherwise, you will have to manually reboot the node for any configuration that requires it. The default value is false. To use this setting when a reboot condition is enacted by something other than DSC (such as Windows Installer), combine this setting with the xPendingReboot module.
:param float refresh_frequency_mins: The time interval, in minutes, at which the LCM checks a pull service to get updated configurations. This value is ignored if the LCM is not configured in pull mode. The default value is 30.
"""
if action_after_reboot is not None:
pulumi.set(__self__, "action_after_reboot", action_after_reboot)
if allow_module_overwrite is not None:
pulumi.set(__self__, "allow_module_overwrite", allow_module_overwrite)
if configuration_mode is not None:
pulumi.set(__self__, "configuration_mode", configuration_mode)
if configuration_mode_frequency_mins is None:
configuration_mode_frequency_mins = 15
if configuration_mode_frequency_mins is not None:
pulumi.set(__self__, "configuration_mode_frequency_mins", configuration_mode_frequency_mins)
if reboot_if_needed is None:
reboot_if_needed = 'False'
if reboot_if_needed is not None:
pulumi.set(__self__, "reboot_if_needed", reboot_if_needed)
if refresh_frequency_mins is None:
refresh_frequency_mins = 30
if refresh_frequency_mins is not None:
pulumi.set(__self__, "refresh_frequency_mins", refresh_frequency_mins)
@property
@pulumi.getter(name="actionAfterReboot")
def action_after_reboot(self) -> Optional[str]:
"""
Specifies what happens after a reboot during the application of a configuration. The possible values are ContinueConfiguration and StopConfiguration
"""
return pulumi.get(self, "action_after_reboot")
@property
@pulumi.getter(name="allowModuleOverwrite")
def allow_module_overwrite(self) -> Optional[str]:
"""
If true - new configurations downloaded from the pull service are allowed to overwrite the old ones on the target node. Otherwise, false
"""
return pulumi.get(self, "allow_module_overwrite")
@property
@pulumi.getter(name="configurationMode")
def configuration_mode(self) -> Optional[str]:
"""
Specifies how the LCM(Local Configuration Manager) actually applies the configuration to the target nodes. Possible values are ApplyOnly, ApplyAndMonitor, and ApplyAndAutoCorrect.
"""
return pulumi.get(self, "configuration_mode")
@property
@pulumi.getter(name="configurationModeFrequencyMins")
def configuration_mode_frequency_mins(self) -> Optional[float]:
"""
How often, in minutes, the current configuration is checked and applied. This property is ignored if the ConfigurationMode property is set to ApplyOnly. The default value is 15.
"""
return pulumi.get(self, "configuration_mode_frequency_mins")
@property
@pulumi.getter(name="rebootIfNeeded")
def reboot_if_needed(self) -> Optional[str]:
"""
Set this to true to automatically reboot the node after a configuration that requires reboot is applied. Otherwise, you will have to manually reboot the node for any configuration that requires it. The default value is false. To use this setting when a reboot condition is enacted by something other than DSC (such as Windows Installer), combine this setting with the xPendingReboot module.
"""
return pulumi.get(self, "reboot_if_needed")
@property
@pulumi.getter(name="refreshFrequencyMins")
def refresh_frequency_mins(self) -> Optional[float]:
"""
The time interval, in minutes, at which the LCM checks a pull service to get updated configurations. This value is ignored if the LCM is not configured in pull mode. The default value is 30.
"""
return pulumi.get(self, "refresh_frequency_mins")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class GuestConfigurationAssignmentPropertiesResponse(dict):
"""
Guest configuration assignment properties.
"""
def __init__(__self__, *,
assignment_hash: str,
compliance_status: str,
last_compliance_status_checked: str,
latest_report_id: str,
provisioning_state: str,
target_resource_id: str,
context: Optional[str] = None,
guest_configuration: Optional['outputs.GuestConfigurationNavigationResponse'] = None,
latest_assignment_report: Optional['outputs.AssignmentReportResponse'] = None):
"""
Guest configuration assignment properties.
:param str assignment_hash: Combined hash of the configuration package and parameters.
:param str compliance_status: A value indicating compliance status of the machine for the assigned guest configuration.
:param str last_compliance_status_checked: Date and time when last compliance status was checked.
:param str latest_report_id: Id of the latest report for the guest configuration assignment.
:param str provisioning_state: The provisioning state, which only appears in the response.
:param str target_resource_id: VM resource Id.
:param str context: The source which initiated the guest configuration assignment. Ex: Azure Policy
:param 'GuestConfigurationNavigationResponseArgs' guest_configuration: The guest configuration to assign.
:param 'AssignmentReportResponseArgs' latest_assignment_report: Last reported guest configuration assignment report.
"""
pulumi.set(__self__, "assignment_hash", assignment_hash)
pulumi.set(__self__, "compliance_status", compliance_status)
pulumi.set(__self__, "last_compliance_status_checked", last_compliance_status_checked)
pulumi.set(__self__, "latest_report_id", latest_report_id)
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "target_resource_id", target_resource_id)
if context is not None:
pulumi.set(__self__, "context", context)
if guest_configuration is not None:
pulumi.set(__self__, "guest_configuration", guest_configuration)
if latest_assignment_report is not None:
pulumi.set(__self__, "latest_assignment_report", latest_assignment_report)
@property
@pulumi.getter(name="assignmentHash")
def assignment_hash(self) -> str:
"""
Combined hash of the configuration package and parameters.
"""
return pulumi.get(self, "assignment_hash")
@property
@pulumi.getter(name="complianceStatus")
def compliance_status(self) -> str:
"""
A value indicating compliance status of the machine for the assigned guest configuration.
"""
return pulumi.get(self, "compliance_status")
@property
@pulumi.getter(name="lastComplianceStatusChecked")
def last_compliance_status_checked(self) -> str:
"""
Date and time when last compliance status was checked.
"""
return pulumi.get(self, "last_compliance_status_checked")
@property
@pulumi.getter(name="latestReportId")
def latest_report_id(self) -> str:
"""
Id of the latest report for the guest configuration assignment.
"""
return pulumi.get(self, "latest_report_id")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state, which only appears in the response.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="targetResourceId")
def target_resource_id(self) -> str:
"""
VM resource Id.
"""
return pulumi.get(self, "target_resource_id")
@property
@pulumi.getter
def context(self) -> Optional[str]:
"""
The source which initiated the guest configuration assignment. Ex: Azure Policy
"""
return pulumi.get(self, "context")
@property
@pulumi.getter(name="guestConfiguration")
def guest_configuration(self) -> Optional['outputs.GuestConfigurationNavigationResponse']:
"""
The guest configuration to assign.
"""
return pulumi.get(self, "guest_configuration")
@property
@pulumi.getter(name="latestAssignmentReport")
def latest_assignment_report(self) -> Optional['outputs.AssignmentReportResponse']:
"""
Last reported guest configuration assignment report.
"""
return pulumi.get(self, "latest_assignment_report")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class GuestConfigurationNavigationResponse(dict):
"""
Guest configuration is an artifact that encapsulates DSC configuration and its dependencies. The artifact is a zip file containing DSC configuration (as MOF) and dependent resources and other dependencies like modules.
"""
def __init__(__self__, *,
content_hash: str,
content_uri: str,
configuration_parameter: Optional[Sequence['outputs.ConfigurationParameterResponse']] = None,
configuration_setting: Optional['outputs.ConfigurationSettingResponse'] = None,
kind: Optional[str] = None,
name: Optional[str] = None,
version: Optional[str] = None):
"""
Guest configuration is an artifact that encapsulates DSC configuration and its dependencies. The artifact is a zip file containing DSC configuration (as MOF) and dependent resources and other dependencies like modules.
:param str content_hash: Combined hash of the guest configuration package and configuration parameters.
:param str content_uri: Uri of the storage where guest configuration package is uploaded.
:param Sequence['ConfigurationParameterResponseArgs'] configuration_parameter: The configuration parameters for the guest configuration.
:param 'ConfigurationSettingResponseArgs' configuration_setting: The configuration setting for the guest configuration.
:param str kind: Kind of the guest configuration. For example:DSC
:param str name: Name of the guest configuration.
:param str version: Version of the guest configuration.
"""
pulumi.set(__self__, "content_hash", content_hash)
pulumi.set(__self__, "content_uri", content_uri)
if configuration_parameter is not None:
pulumi.set(__self__, "configuration_parameter", configuration_parameter)
if configuration_setting is not None:
pulumi.set(__self__, "configuration_setting", configuration_setting)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if name is not None:
pulumi.set(__self__, "name", name)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="contentHash")
def content_hash(self) -> str:
"""
Combined hash of the guest configuration package and configuration parameters.
"""
return pulumi.get(self, "content_hash")
@property
@pulumi.getter(name="contentUri")
def content_uri(self) -> str:
"""
Uri of the storage where guest configuration package is uploaded.
"""
return pulumi.get(self, "content_uri")
@property
@pulumi.getter(name="configurationParameter")
def configuration_parameter(self) -> Optional[Sequence['outputs.ConfigurationParameterResponse']]:
"""
The configuration parameters for the guest configuration.
"""
return pulumi.get(self, "configuration_parameter")
@property
@pulumi.getter(name="configurationSetting")
def configuration_setting(self) -> Optional['outputs.ConfigurationSettingResponse']:
"""
The configuration setting for the guest configuration.
"""
return pulumi.get(self, "configuration_setting")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of the guest configuration. For example:DSC
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the guest configuration.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def version(self) -> Optional[str]:
"""
Version of the guest configuration.
"""
return pulumi.get(self, "version")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VMInfoResponse(dict):
"""
Information about the VM.
"""
def __init__(__self__, *,
id: str,
uuid: str):
"""
Information about the VM.
:param str id: Azure resource Id of the VM.
:param str uuid: UUID(Universally Unique Identifier) of the VM.
"""
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "uuid", uuid)
@property
@pulumi.getter
def id(self) -> str:
"""
Azure resource Id of the VM.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def uuid(self) -> str:
"""
UUID(Universally Unique Identifier) of the VM.
"""
return pulumi.get(self, "uuid")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
|
[
"noreply@github.com"
] |
MisinformedDNA.noreply@github.com
|
4608f8b477a4827ab546757c5cdf0cf175bfa969
|
841e32970a080c8beb4ccc94d2afc5f264483a45
|
/api/app/migrations/0001_initial.py
|
d227e2d9a19eee4bb8de17faada02127cabe7b35
|
[] |
no_license
|
semprajapat/automation_pytest
|
3249fec117186ee9984674585b79fe0d75a15a6c
|
05fb58c5cece1043317bf444e8636fd49564fccc
|
refs/heads/master
| 2021-01-14T19:18:53.865204
| 2020-02-24T12:22:16
| 2020-02-24T12:22:16
| 242,727,715
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 547
|
py
|
# Generated by Django 3.0.3 on 2020-02-24 11:42
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Datamodel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('last', models.CharField(max_length=50)),
],
),
]
|
[
"aaa@Aaas-MacBook-Pro.local"
] |
aaa@Aaas-MacBook-Pro.local
|
6ff0d59f4790561ec2ce92b3a868755af76b678b
|
3225f11370c581f95e4a5d123ab03eb7de53c6b3
|
/Face-Recognition/face_reco_video.py
|
f174fa3c4a616303402acdefed3781ca94f2554f
|
[] |
no_license
|
jaseem61/python_practice
|
5684ae2f3925c54b2d34666f2531cb99acc96609
|
7bd8ca1b72acb5e78e2c0a451ef8e339417fcff7
|
refs/heads/master
| 2022-04-22T04:15:15.140748
| 2020-04-18T07:10:04
| 2020-04-18T07:10:04
| 231,757,850
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,823
|
py
|
import face_recognition
import os
import cv2
import keyboard
known_faces_dir="known_faces"
count=1
tolerance=0.6
frame_thickness=3
font_thickness=2
model="hog"
print("loading known faces")
known_faces=[]
known_names=[]
for name in os.listdir(known_faces_dir):
for filename in os.listdir(f"{known_faces_dir}/{name}"):
image=face_recognition.load_image_file(f"{known_faces_dir}/{name}/{filename}")
encoding=face_recognition.face_encodings(image)
if encoding:
known_faces.append(encoding)
known_names.append(name)
print("processing unknown faces")
cap=cv2.VideoCapture(0)
while True:
ret,image=cap.read()
locations=face_recognition.face_locations(image,model=model)
encodings=face_recognition.face_encodings(image,locations)
for face_encoding, face_location in zip(encodings,locations):
count=count+1
results=face_recognition.compare_faces(known_faces,face_encoding,tolerance=0.3)
match=None
if bool(results):
print(count)
match= known_names[0]
print(f"Match found:{match}")
top_left=(face_location[3],face_location[0])
bottom_right=(face_location[1],face_location[2])
color=[0,255,0]
cv2.rectangle(image,top_left,bottom_right,color,frame_thickness)
top_left=(face_location[3],face_location[2])
bottom_right=(face_location[1],face_location[2]+22)
cv2.rectangle(image,top_left,bottom_right,color,cv2.FILLED)
cv2.putText(image,match,(face_location[3]+10,face_location[2]+15),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),font_thickness)
cv2.imshow(filename,image)
cv2.waitKey(1)
if(keyboard.is_pressed('q')):
break
|
[
"noreply@github.com"
] |
jaseem61.noreply@github.com
|
cc132717c790921bdf671f8150bb2f9d868ad2fe
|
6ee3daaec8559287b6e9f07ae93e49ab0cbd3a89
|
/Edabit/Edabit_6306022610113_ch7.py
|
b823274d734c905f545a8666458f89e5e1f6a666
|
[] |
no_license
|
6306022610113/INE_Problem
|
9a43c42d71faa4ed7c4da8b83a53bad0a67ac7a2
|
99f3215aafecc486d81fb2d26aeb962d90970768
|
refs/heads/main
| 2023-04-21T11:03:07.471197
| 2021-05-06T04:56:07
| 2021-05-06T04:56:07
| 328,900,853
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 684
|
py
|
def shared_letters(a, b): #รับค่าตัวเเปร a และ b
lst = [] #สร้างตัวแปรรับค่า
for i in a.lower(): #ถ้า i มีค่าใน a เป็นตัวพิมพ์เล็ก
if i in b.lower(): #และ i มีค่าน b
lst.append(i) #ให้เพิ่ม i ในตัวแปร lst
return ''.join(sorted(set(lst))) #ส่งตัวข้อมูลใน lst ออก
print(shared_letters("house", "home"))
print(shared_letters("Micky", "mouse"))
print(shared_letters("house", "villa"))
|
[
"68582327+6306022610113@users.noreply.github.com"
] |
68582327+6306022610113@users.noreply.github.com
|
49b63a0524f032834d51833a9cee91640d52b635
|
06933e4550c4d647ecedab639c1fa9748d7aa155
|
/tvshows/tvshows_app/models.py
|
8cdbce4772b4f7f1552184f5fac82bb38761ab97
|
[] |
no_license
|
leoalicastro/tv_shows
|
ce4bb052c64ed6aba34194104f6f31462c1a61c5
|
890122a4a7eda81cb24f10f88cc217c132e7850b
|
refs/heads/main
| 2023-07-16T15:00:03.292785
| 2021-08-19T18:50:09
| 2021-08-19T18:50:09
| 390,782,646
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,045
|
py
|
from django.db import models
from datetime import datetime
class ShowManager(models.Manager):
def basic_validator(self, post_data):
errors = {}
if len(post_data['title']) < 2:
errors['title'] = "Title must be atleast 2 characters"
if len(post_data['network']) < 3:
errors['network'] = "Network must be atleast 2 characters"
if post_data['desc'] != '' and len(post_data['desc']) < 10:
errors['desc'] = "Description must be atleast 10 characters"
if datetime.strptime(post_data['release'], '%Y-%m-%d') > datetime.now():
errors['release'] = 'Release Date should be in the past'
return errors
class Show(models.Model):
title = models.CharField(max_length=255)
network = models.CharField(max_length=255)
release = models.DateField()
desc = models.CharField(max_length=255)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = ShowManager()
|
[
"leoalicastro957@gmail.com"
] |
leoalicastro957@gmail.com
|
b4e76b67a52d7e11e271463c76c756cd39c39301
|
f09978f2a0850278255bd198222cd3990cb0c687
|
/gear/schema.py
|
9e678012c38374b5baee61fdf28ff22143a7874c
|
[] |
no_license
|
szpone/climbing-gear
|
0e4e53b99a0b550c0e172af21c2c9e08e2c3f1ba
|
78ab13b97b4b66464859b95ba6e5ed8587d5e60c
|
refs/heads/master
| 2022-12-12T11:08:57.277056
| 2019-06-05T16:06:02
| 2019-06-05T16:06:02
| 185,016,538
| 1
| 0
| null | 2022-11-22T03:49:28
| 2019-05-05T10:30:11
|
Python
|
UTF-8
|
Python
| false
| false
| 514
|
py
|
import graphene
from graphene_django.types import DjangoObjectType, ObjectType
from .models import Gear
class GearType(DjangoObjectType):
class Meta:
model = Gear
class Query(ObjectType):
gear = graphene.Field(GearType, id=graphene.Int())
gears = graphene.List(GearType)
def resolve_gear(self, info, gear_id):
return Gear.objects.filter(id=gear_id).first()
def resolve_gears(self, info, **kwargs):
return Gear.objects.all()
schema = graphene.Schema(query=Query)
|
[
"nikola.adamus@gmail.com"
] |
nikola.adamus@gmail.com
|
4eee374d40da98978fa6eead0dbd109ebd17f59e
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2449/60657/249828.py
|
b6f2e07860c983d2311d854da47037a89843a79d
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 152
|
py
|
import math
A=input().split(',')
B=input()
def judge(A,B):
if A.count(B)!=0:
return A.index(B)
else:
return -1
print(judge(A,B))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
0e9776198e43fe8ba697233bc1a7c1c9c3496279
|
bef71d057048b93ef784892d911e7c2f7ffaee14
|
/framework_autotest/testsuites/test_wirenetwork.py
|
91fb3dbad2201ce112d8c4fcf0e8e7290c8c7e12
|
[] |
no_license
|
leaf2maple/python-selenium-unittest
|
2379403e98508000276bb5d0c89866efc5450d90
|
50bf514144c6cd6e8d0f51164731bbad367e9356
|
refs/heads/master
| 2020-09-01T00:26:30.307410
| 2019-10-31T17:52:09
| 2019-10-31T17:52:09
| 218,826,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,572
|
py
|
import unittest
import time
from framework.browser_engine import BrowserEngine
from pageobject.wb01_wirenetworkpage import WireNetWorkPage
from framework.login import Login
from pageobject.wb01_homepage import HomePage
class TestWireNetWork(unittest.TestCase):
@classmethod
def setUpClass(cls):
browser = BrowserEngine(cls)
cls.driver = browser.open_browser(cls)
login = Login(cls.driver)
login.skip_or_login()
homepage = HomePage(cls.driver)
homepage.wireNetwork_click()
time.sleep(5)
@classmethod
def tearDownClass(cls):
cls.driver.quit()
def test_011_wirenetwork_switch(self):
wirenetwork = WireNetWorkPage(self.driver)
# 关闭
wirenetwork.wirenetwork_switch_click()
time.sleep(1)
try:
el = self.driver.find_element_by_xpath("//div[@class='wireNetwork']")
assert "IP设置" not in el.text
print("test_011 pass")
wirenetwork.get_screenshot_as_file()
except Exception as e:
print("test_011 fail", format(e))
# 打开
wirenetwork.wirenetwork_switch_click()
time.sleep(1)
try:
el = self.driver.find_element_by_xpath("//span[@class='title' and text()='IP设置']")
assert "IP设置" in el.text
print("test_011 pass")
wirenetwork.get_screenshot_as_file()
except Exception as e:
print("test_011 fail", format(e))
def test_012_ip_manual_set(self):
wirenetwork = WireNetWorkPage(self.driver)
wirenetwork.ip_ul_click()
wirenetwork.ip_manual_set_click()
try:
el = "//span[@class='address-input-title' and text()='IP地址']/following-sibling::span[1]"
assert self.driver.find_element_by_xpath(el) is True
print("test_012 pass")
wirenetwork.get_screenshot_as_file()
except Exception as e:
print("test_012 fail", format(e))
def test_013_ip_auto_set(self):
wirenetwork = WireNetWorkPage(self.driver)
wirenetwork.ip_ul_click()
wirenetwork.ip_auto_set_click()
try:
el = "//span[@class='address-input-title' and text()='IP地址']/following-sibling::span[1]"
assert self.driver.find_element_by_xpath(el) is True
print("test_013 pass")
wirenetwork.get_screenshot_as_file()
except Exception as e:
print("test_013 fail", format(e))
if __name__ == '__main__':
unittest.main()
|
[
"421757223@qq.com"
] |
421757223@qq.com
|
a5db37c7dc9f8509adffc6dc45b2b5386d2c55a7
|
f3a3228c1afa0e252fa041553e450b3b53e273ec
|
/zetcode/tetris.py
|
88a6ac7bbcdf3616296a1f89d250cfc2f9c15cbf
|
[] |
no_license
|
dugbang/pyqt_prj
|
9395d25202d43fcadc577c9ff8606f649a575c9a
|
ed4fae66496e57258cdb22360273462a1ac59ea0
|
refs/heads/master
| 2020-04-13T07:04:41.980757
| 2019-01-29T00:41:17
| 2019-01-29T00:41:17
| 163,039,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,455
|
py
|
"""
ZetCode PyQt5 tutorial
This is a Tetris game clone.
Author: Jan Bodnar
Website: zetcode.com
Last edited: August 2017
"""
from PyQt5.QtWidgets import QMainWindow, QFrame, QDesktopWidget, QApplication
from PyQt5.QtCore import Qt, QBasicTimer, pyqtSignal
from PyQt5.QtGui import QPainter, QColor
import sys, random
class Tetris(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
'''initiates application UI'''
self.tboard = Board(self)
self.setCentralWidget(self.tboard)
self.statusbar = self.statusBar()
self.tboard.msg2Statusbar[str].connect(self.statusbar.showMessage)
self.tboard.start()
self.resize(180, 380)
self.center()
self.setWindowTitle('Tetris')
self.show()
def center(self):
'''centers the window on the screen'''
screen = QDesktopWidget().screenGeometry()
# screen = QDesktopWidget().availableGeometry()
size = self.geometry()
self.move((screen.width() - size.width()) / 2,
(screen.height() - size.height()) / 2)
def center_(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
class Board(QFrame):
msg2Statusbar = pyqtSignal(str)
BoardWidth = 10
BoardHeight = 22
Speed = 300
def __init__(self, parent):
super().__init__(parent)
self.initBoard()
def initBoard(self):
'''initiates board'''
self.timer = QBasicTimer()
self.isWaitingAfterLine = False
self.curX = 0
self.curY = 0
self.numLinesRemoved = 0
self.board = []
self.setFocusPolicy(Qt.StrongFocus)
self.isStarted = False
self.isPaused = False
self.clearBoard()
def shapeAt(self, x, y):
'''determines shape at the board position'''
return self.board[(y * Board.BoardWidth) + x]
def setShapeAt(self, x, y, shape):
'''sets a shape at the board'''
self.board[(y * Board.BoardWidth) + x] = shape
def squareWidth(self):
'''returns the width of one square'''
return self.contentsRect().width() // Board.BoardWidth
def squareHeight(self):
'''returns the height of one square'''
return self.contentsRect().height() // Board.BoardHeight
def start(self):
'''starts game'''
if self.isPaused:
return
self.isStarted = True
self.isWaitingAfterLine = False
self.numLinesRemoved = 0
self.clearBoard()
self.msg2Statusbar.emit(str(self.numLinesRemoved))
self.newPiece()
self.timer.start(Board.Speed, self)
def pause(self):
'''pauses game'''
if not self.isStarted:
return
self.isPaused = not self.isPaused
if self.isPaused:
self.timer.stop()
self.msg2Statusbar.emit("paused")
else:
self.timer.start(Board.Speed, self)
self.msg2Statusbar.emit(str(self.numLinesRemoved))
self.update()
def paintEvent(self, event):
'''paints all shapes of the game'''
painter = QPainter(self)
rect = self.contentsRect()
boardTop = rect.bottom() - Board.BoardHeight * self.squareHeight()
for i in range(Board.BoardHeight):
for j in range(Board.BoardWidth):
shape = self.shapeAt(j, Board.BoardHeight - i - 1)
if shape != Tetrominoe.NoShape:
self.drawSquare(painter,
rect.left() + j * self.squareWidth(),
boardTop + i * self.squareHeight(), shape)
if self.curPiece.shape() != Tetrominoe.NoShape:
for i in range(4):
x = self.curX + self.curPiece.x(i)
y = self.curY - self.curPiece.y(i)
self.drawSquare(painter, rect.left() + x * self.squareWidth(),
boardTop + (Board.BoardHeight - y - 1) * self.squareHeight(),
self.curPiece.shape())
def keyPressEvent(self, event):
'''processes key press events'''
if not self.isStarted or self.curPiece.shape() == Tetrominoe.NoShape:
super(Board, self).keyPressEvent(event)
return
key = event.key()
if key == Qt.Key_P:
self.pause()
return
if self.isPaused:
return
elif key == Qt.Key_Left:
self.tryMove(self.curPiece, self.curX - 1, self.curY)
elif key == Qt.Key_Right:
self.tryMove(self.curPiece, self.curX + 1, self.curY)
elif key == Qt.Key_Down:
self.tryMove(self.curPiece.rotateRight(), self.curX, self.curY)
elif key == Qt.Key_Up:
self.tryMove(self.curPiece.rotateLeft(), self.curX, self.curY)
elif key == Qt.Key_Space:
self.dropDown()
elif key == Qt.Key_D:
self.oneLineDown()
else:
super(Board, self).keyPressEvent(event)
def timerEvent(self, event):
'''handles timer event'''
if event.timerId() == self.timer.timerId():
if self.isWaitingAfterLine:
self.isWaitingAfterLine = False
self.newPiece()
else:
self.oneLineDown()
else:
super(Board, self).timerEvent(event)
def clearBoard(self):
'''clears shapes from the board'''
for i in range(Board.BoardHeight * Board.BoardWidth):
self.board.append(Tetrominoe.NoShape)
def dropDown(self):
'''drops down a shape'''
newY = self.curY
while newY > 0:
if not self.tryMove(self.curPiece, self.curX, newY - 1):
break
newY -= 1
self.pieceDropped()
def oneLineDown(self):
'''goes one line down with a shape'''
if not self.tryMove(self.curPiece, self.curX, self.curY - 1):
self.pieceDropped()
def pieceDropped(self):
'''after dropping shape, remove full lines and create new shape'''
for i in range(4):
x = self.curX + self.curPiece.x(i)
y = self.curY - self.curPiece.y(i)
self.setShapeAt(x, y, self.curPiece.shape())
self.removeFullLines()
if not self.isWaitingAfterLine:
self.newPiece()
def removeFullLines(self):
'''removes all full lines from the board'''
numFullLines = 0
rowsToRemove = []
for i in range(Board.BoardHeight):
n = 0
for j in range(Board.BoardWidth):
if not self.shapeAt(j, i) == Tetrominoe.NoShape:
n = n + 1
if n == 10:
rowsToRemove.append(i)
rowsToRemove.reverse()
for m in rowsToRemove:
for k in range(m, Board.BoardHeight):
for l in range(Board.BoardWidth):
self.setShapeAt(l, k, self.shapeAt(l, k + 1))
numFullLines = numFullLines + len(rowsToRemove)
if numFullLines > 0:
self.numLinesRemoved = self.numLinesRemoved + numFullLines
self.msg2Statusbar.emit(str(self.numLinesRemoved))
self.isWaitingAfterLine = True
self.curPiece.setShape(Tetrominoe.NoShape)
self.update()
def newPiece(self):
'''creates a new shape'''
self.curPiece = Shape()
self.curPiece.setRandomShape()
self.curX = Board.BoardWidth // 2 + 1
self.curY = Board.BoardHeight - 1 + self.curPiece.minY()
if not self.tryMove(self.curPiece, self.curX, self.curY):
self.curPiece.setShape(Tetrominoe.NoShape)
self.timer.stop()
self.isStarted = False
self.msg2Statusbar.emit("Game over")
def tryMove(self, newPiece, newX, newY):
'''tries to move a shape'''
for i in range(4):
x = newX + newPiece.x(i)
y = newY - newPiece.y(i)
if x < 0 or x >= Board.BoardWidth or y < 0 or y >= Board.BoardHeight:
return False
if self.shapeAt(x, y) != Tetrominoe.NoShape:
return False
self.curPiece = newPiece
self.curX = newX
self.curY = newY
self.update()
return True
def drawSquare(self, painter, x, y, shape):
'''draws a square of a shape'''
colorTable = [0x000000, 0xCC6666, 0x66CC66, 0x6666CC,
0xCCCC66, 0xCC66CC, 0x66CCCC, 0xDAAA00]
color = QColor(colorTable[shape])
painter.fillRect(x + 1, y + 1, self.squareWidth() - 2,
self.squareHeight() - 2, color)
painter.setPen(color.lighter())
painter.drawLine(x, y + self.squareHeight() - 1, x, y)
painter.drawLine(x, y, x + self.squareWidth() - 1, y)
painter.setPen(color.darker())
painter.drawLine(x + 1, y + self.squareHeight() - 1,
x + self.squareWidth() - 1, y + self.squareHeight() - 1)
painter.drawLine(x + self.squareWidth() - 1,
y + self.squareHeight() - 1, x + self.squareWidth() - 1, y + 1)
class Tetrominoe(object):
NoShape = 0
ZShape = 1
SShape = 2
LineShape = 3
TShape = 4
SquareShape = 5
LShape = 6
MirroredLShape = 7
class Shape(object):
coordsTable = (
((0, 0), (0, 0), (0, 0), (0, 0)),
((0, -1), (0, 0), (-1, 0), (-1, 1)),
((0, -1), (0, 0), (1, 0), (1, 1)),
((0, -1), (0, 0), (0, 1), (0, 2)),
((-1, 0), (0, 0), (1, 0), (0, 1)),
((0, 0), (1, 0), (0, 1), (1, 1)),
((-1, -1), (0, -1), (0, 0), (0, 1)),
((1, -1), (0, -1), (0, 0), (0, 1))
)
def __init__(self):
self.coords = [[0, 0] for i in range(4)]
self.pieceShape = Tetrominoe.NoShape
self.setShape(Tetrominoe.NoShape)
def shape(self):
'''returns shape'''
return self.pieceShape
def setShape(self, shape):
'''sets a shape'''
table = Shape.coordsTable[shape]
for i in range(4):
for j in range(2):
self.coords[i][j] = table[i][j]
self.pieceShape = shape
def setRandomShape(self):
'''chooses a random shape'''
self.setShape(random.randint(1, 7))
def x(self, index):
'''returns x coordinate'''
return self.coords[index][0]
def y(self, index):
'''returns y coordinate'''
return self.coords[index][1]
def setX(self, index, x):
'''sets x coordinate'''
self.coords[index][0] = x
def setY(self, index, y):
'''sets y coordinate'''
self.coords[index][1] = y
def minX(self):
'''returns min x value'''
m = self.coords[0][0]
for i in range(4):
m = min(m, self.coords[i][0])
return m
def maxX(self):
'''returns max x value'''
m = self.coords[0][0]
for i in range(4):
m = max(m, self.coords[i][0])
return m
def minY(self):
'''returns min y value'''
m = self.coords[0][1]
for i in range(4):
m = min(m, self.coords[i][1])
return m
def maxY(self):
'''returns max y value'''
m = self.coords[0][1]
for i in range(4):
m = max(m, self.coords[i][1])
return m
def rotateLeft(self):
'''rotates shape to the left'''
if self.pieceShape == Tetrominoe.SquareShape:
return self
result = Shape()
result.pieceShape = self.pieceShape
for i in range(4):
result.setX(i, self.y(i))
result.setY(i, -self.x(i))
return result
def rotateRight(self):
'''rotates shape to the right'''
if self.pieceShape == Tetrominoe.SquareShape:
return self
result = Shape()
result.pieceShape = self.pieceShape
for i in range(4):
result.setX(i, -self.y(i))
result.setY(i, self.x(i))
return result
if __name__ == '__main__':
app = QApplication([])
tetris = Tetris()
sys.exit(app.exec_())
|
[
"dugbang@gmail.com"
] |
dugbang@gmail.com
|
f4d9d13be2187390413090795c88670b3fbc20fd
|
36644ad31dc42a91cae5200559d3f591b90c3d83
|
/server3/test.py
|
d6f20f35763db67f220d6b7a0d4cfaca6d000768
|
[
"MIT"
] |
permissive
|
kenken64/docker-microservices
|
77070f6a782407e52d45bd5a546efc8758c000a0
|
1923f935de40afda3af5529fadc0b3747b3b3e56
|
refs/heads/master
| 2023-01-22T17:25:44.566427
| 2019-11-22T08:05:44
| 2019-11-22T08:05:44
| 222,846,340
| 1
| 4
|
MIT
| 2023-01-07T11:56:43
| 2019-11-20T03:52:25
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 287
|
py
|
from pymongo import MongoClient
import os
from bson import json_util
client = MongoClient('mongodb://localhost:27017')
print(client)
db = client['testdb']
collection = db.movie.find()
print(collection)
y = list(collection)
print(y)
#x = json_util.dumps({'data': collection })
#print(x)
|
[
"bunnyppl@gmail.com"
] |
bunnyppl@gmail.com
|
5c5a54f0963e8d6bd055050c7770fbf455661208
|
12a62bbca8065dcb6d835144368f6ad4cf46f219
|
/random_proj.py
|
e15f439ab7201a601e168512de11ffb755e3dcbf
|
[] |
no_license
|
daemonmaker/biglittle
|
c2371b198a43273275144036e3971c8035efd588
|
feadb55aa68f5b54f52084e0a12368783c93dd78
|
refs/heads/master
| 2021-01-11T11:03:25.318130
| 2014-11-25T23:56:42
| 2014-11-25T23:56:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26,591
|
py
|
#! /usr/bin/env python
import time
from datetime import datetime
import numpy as np
import sys
import os
import os.path as op
import cPickle as pkl
from itertools import product
import gc
import theano
from theano import function
from theano import tensor as T
from theano import config
from theano import shared
from theano.tensor.shared_randomstreams import RandomStreams
from utils import *
from experiments import Experiments
from layer import HiddenLayer, HiddenBlockLayer, HiddenRandomBlockLayer
from timing_stats import TimingStats as TS
from models import (
EqualParametersModel,
EqualComputationsModel,
SparseBlockModel,
all_same
)
def simple_train(
model,
train_model,
test_model,
validate_model,
learning_rate,
shared_learning_rate,
timing_stats,
n_epochs=1000
):
timing_stats.add(['epoch', 'train'])
epoch = 0
minibatch_avg_cost_accum = 0
while(epoch < n_epochs):
print "Epoch %d" % epoch
timing_stats.start('epoch')
for minibatch_index in xrange(model.data.n_train_batches):
if minibatch_index % 10 == 0:
print '... minibatch_index: %d/%d\r' \
% (minibatch_index, model.data.n_train_batches),
# Note the magic comma on the previous line prevents new lines
timing_stats.start('train')
minibatch_avg_cost = train_model(minibatch_index)
timing_stats.end('train')
minibatch_avg_cost_accum += minibatch_avg_cost[0]
print '... minibatch_avg_cost_accum: %f' \
% (minibatch_avg_cost_accum/float(model.data.n_train_batches))
timing_stats.end('epoch')
epoch += 1
def train(
model,
train_model,
test_model,
validate_model,
learning_rate,
shared_learning_rate,
timing_stats,
n_epochs=1000
):
def summarize_rates():
print "Learning rate: ", learning_rate.rate
# early-stopping parameters
patience = 10000 # look as this many examples regardless
patience_increase = 100 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(data.n_train_batches, patience / 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_params = None
this_validation_loss = 0
best_validation_loss = np.inf
best_iter = 0
test_score = 0.
accum = 0
epoch = 0
done_looping = False
timing_stats.add(['train', 'epoch', 'valid'])
summarize_rates()
timing_stats.start()
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
timing_stats.start('epoch')
for minibatch_index in xrange(data.n_train_batches):
timing_stats.start('train')
minibatch_avg_cost = train_model(minibatch_index)
timing_stats.end('train')
#print "0: ", model.layers[-5].in_idxs.get_value()
#print "1: ", model.layers[-4].in_idxs.get_value()
#print "2: ", model.layers[-3].in_idxs.get_value()
#print "3: ", model.layers[-2].in_idxs.get_value()
#print "4: ", model.layers[-1].in_idxs.get_value()
minibatch_avg_cost = minibatch_avg_cost[0]
accum = accum + minibatch_avg_cost
# print (
# "minibatch_avg_cost: " + str(minibatch_avg_cost)
# + " minibatch_avg_cost: " + str(minibatch_avg_cost)
# )
# print (
# l_layers[0].W.get_value().sum()
# + ' ' + l_layers[1].W.get_value().sum()
# + ' '
# + layers[0].W.get_value().sum()
# + ' ' + layers[1].W.get_value().sum()
# )
# print (
# "A: " + np.max(np.abs(layers[0].W.get_value()))
# + ' ' + np.max(np.abs(layers[0].b.get_value()))
# + ' ' + np.max(np.abs(layers[1].W.get_value()))
# + ' ' + np.max(np.abs(layers[1].b.get_value()))
# )
# print (
# "B: " + np.abs(layers[0].W.get_value()).sum()
# + ' ' + np.abs(layers[0].b.get_value()).sum()
# + ' ' + np.abs(layers[1].W.get_value()).sum()
# + ' ' + np.abs(layers[1].b.get_value()).sum()
# )
# print (
# "C: " + np.abs(np.array(minibatch_avg_cost[1])).sum()
# + ' ' + np.abs(np.array(minibatch_avg_cost[2])).sum()
# + ' ' + np.abs(np.array(minibatch_avg_cost[3])).sum()
# + ' ' + np.abs(np.array(minibatch_avg_cost[4])).sum()
# )
# iteration number
iter = (epoch - 1) * data.n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
timing_stats.end('epoch')
timing_stats.reset('epoch')
timing_stats.reset('train')
accum = accum / validation_frequency
summary = ("minibatch_avg_cost: %f, time: %f"
% (accum, timing_stats.accumed['train'][-1][1]))
accum = 0
print "%s" % (summary)
# compute zero-one loss on validation set
summary = (
'epoch %i, minibatch %i/%i'
% (
epoch, minibatch_index + 1, data.n_train_batches
)
)
validation_losses = [validate_model(i) for i
in xrange(data.n_valid_batches)]
this_validation_loss = np.mean(validation_losses)
#this_validation_loss = 0
summary = ('validation error %f %% '
% (this_validation_loss * 100.))
print ("%s" % (summary))
# if we got the best validation score until now
this_validation_loss = this_validation_loss
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
best_validation_loss = this_validation_loss
best_iter = iter
# test it on the test set
test_losses = [test_model(i) for i
in xrange(data.n_test_batches)]
test_score = np.mean(test_losses)
#test_score = 0
summary = 'test_score: %f' % (test_score * 100.)
print (' epoch %i, minibatch %i/%i,'
' test error of best model %s'
% (epoch, minibatch_index + 1,
data.n_train_batches, summary))
learning_rate.update()
shared_learning_rate.set_value(learning_rate.rate)
summarize_rates()
if patience <= iter:
done_looping = True
break
timing_stats.end()
print('Optimization complete. Best validation score of %f %% '
'obtained at iteration %i, with test performance %f %%' %
(best_validation_loss * 100., best_iter + 1, test_score * 100.))
print >> sys.stderr, ('The code for file ' +
os.path.split(__file__)[1] +
' ran for %s' % timing_stats)
def run_experiments(exps, models, rng=None):
if rng is None:
rng = np.random.RandomState()
data = None
model = None
timings = None
for idx, model_class in product(exps, models):
print 'Experiment: %d, Model class: %s' % (idx, model_class)
parameters = exps.get_parameters_by_exp_idx(idx)
print 'Batch size: %d' % parameters['batch_size']
if (
data is None
or data.batch_size != parameters['batch_size']
or data.reshape_data != model_class.reshape_data
):
print 'Loading Data'
print '... MNIST'
data = MNIST(parameters['batch_size'], model_class.reshape_data)
gc.collect()
try:
shared_learning_rate = shared(
np.array(
parameters['learning_rate'].rate,
dtype=config.floatX
),
name='learning_rate'
)
timings = TS(['build_model', 'build_functions', 'full_train'])
print 'Building model: %s' % str(model_class)
timings.start('build_model')
layer_definitions = exps.get_layers_definition(idx)
model = model_class(
data=data,
layer_descriptions=layer_definitions,
batch_size=parameters['batch_size'],
learning_rate=shared_learning_rate,
L1_reg=parameters['L1_reg'],
L2_reg=parameters['L2_reg'],
)
print '... time: %f' % timings.end('build_model')
print 'Building functions'
timings.start('build_functions')
functions = model.build_functions()
print '... time: %f' % timings.end('build_functions')
print 'Training'
timings.start('full_train')
simple_train(
model,
learning_rate=parameters['learning_rate'],
shared_learning_rate=shared_learning_rate,
n_epochs=parameters['n_epochs'],
timing_stats=timings,
**functions
)
print 'Training time: %d' % timings.end('full_train')
model = None
except MemoryError:
epoch_time = -1
if timings is not None:
print 'Timings: %s' % timings
exps.save(idx, model_class.__name__, 'timings', timings)
timings = None
gc.collect()
pkl.dump(exps, open('random_proj_experiments.pkl', 'wb'))
def plot_times_by_batch(database):
import matplotlib.pyplot as plt
# Load the database
exps = pkl.load(open(database, 'rb'))
# Find experiments that have results
exp_idxs = exps.get_idxs('experiments', has_results=True)
# Plot results for each experiment grouped by the layers_description
layers_description_idxs = exps.get_table_idxs_by_exp_idxs(
'layers_description',
exp_idxs
)
for layers_description_idx in layers_description_idxs:
result_idxs = exps.get_result_idxs_by_table_idx(
'layers_description',
layers_description_idx
)
batch_sizes = [exps.get_parameters_by_exp_idx(idx)['batch_size']
for idx in result_idxs]
timings = {model_name: np.zeros(len(batch_sizes))
for model_name in exps.results[result_idxs[0]].keys()}
for i, idx in enumerate(result_idxs):
for model_name, stats in exps.results[idx].iteritems():
timings[model_name][i] = stats[
'timings'
].mean_difference('train')/batch_sizes[i]
for model_name, timings in timings.iteritems():
plt.plot(batch_sizes, timings, marker='o', label=model_name,)
plt.title('Train time per sample', fontsize=12)
layers_description = exps.get_layers_description(
layers_description_idx
)
plt.suptitle(
'layers_description_idx: %d, n_units: %s, n_hids: %s,\n'
'k_pers: %s, all same: %r' % (
layers_description_idx,
layers_description['n_hids'],
layers_description['n_units_per'],
layers_description['k_pers'],
'index_selection_funcs' in layers_description.keys()
),
y=0.99,
fontsize=10
)
plt.xlabel('Batch Size')
plt.ylabel('Time (s)')
plt.legend()
plt.xticks(batch_sizes)
figs_dir = 'figs'
if not op.exists(figs_dir):
os.mkdir(figs_dir)
plt.savefig(
op.join(
figs_dir,
'layers_description_%d.png' % layers_description_idx
),
format='png'
)
plt.show()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Run random_proj experiments and plot results'
)
parser.add_argument(
'-m', '--use_layers',
type=int, default=[], nargs='+',
help='Identifier for which models to use in the experiments.'
)
parser.add_argument(
'-c', '--layer_class',
default='HiddenRandomBlockLayer',
help='The type of layer to use in the block sparse model.'
)
parser.add_argument(
'-b', '--batch_sizes',
type=int, default=[32], nargs='+',
help='Range of batch sizes to test.'
)
parser.add_argument(
'-n', '--number_of_epochs',
type=int, default=1,
help='Number of epochs to execute for each experiment.'
)
parser.add_argument(
'-u', '--units_per_block',
type=int, default=32,
help='Number of units per block in the sparse block models.'
)
parser.add_argument(
'-d', '--database',
default='random_proj_experiments.pkl',
help='Which database to use.'
)
parser.add_argument(
'-l', '--load_database',
default=False, action='store_true',
help='Whether to load an existing database.'
)
parser.add_argument(
'-p', '--plot',
default=False,
action='store_true',
help='Plot results instaed of execute experiments.'
)
args = parser.parse_args()
if args.plot:
plot_times_by_batch(args.database)
else:
if args.load_database:
exps = pkl.load(open(args.database))
else:
## Determine the type of sparsity layer to use
if args.layer_class == 'HiddenRandomBlockLayer':
layer_class = HiddenRandomBlockLayer
else:
layer_class = HiddenBlockLayer
## Create experiments
exps = Experiments(
input_dim=784, # data.train_set_x.shape[-1].eval(),
num_classes=10
)
# Add descriptions of models
exps.add_layers_description(
0,
{
'n_hids': (25,),
'n_units_per': args.units_per_block,
'k_pers': (1, 1),
'activations': (T.tanh, None),
'layer_classes': [
HiddenBlockLayer,
HiddenBlockLayer,
],
}
)
exps.add_layers_description(
1,
{
'n_hids': (25, 25),
'n_units_per': args.units_per_block,
'k_pers': (1, 0.5, 1),
'activations': (T.tanh, T.tanh, None),
'layer_classes': [
HiddenBlockLayer,
layer_class,
HiddenBlockLayer,
],
}
)
exps.add_layers_description(
2,
{
'n_hids': (25, 100, 25),
'n_units_per': args.units_per_block,
'k_pers': (1, 0.25, 0.25, 1),
'activations': (T.tanh, T.tanh, T.tanh, None),
'layer_classes': [
HiddenBlockLayer,
layer_class,
layer_class,
HiddenBlockLayer,
],
}
)
exps.add_layers_description(
3,
{
'n_hids': (25, 100, 25),
'n_units_per': args.units_per_block,
'k_pers': (1., 0.25, 0.25, 1),
'activations': (T.tanh, T.tanh, T.tanh, None),
'layer_classes': [
HiddenBlockLayer,
layer_class,
layer_class,
HiddenBlockLayer,
],
'index_selection_funcs': (
None, all_same, all_same, None
)
}
)
exps.add_layers_description(
4,
{
'n_hids': (50, 100, 20),
'n_units_per': args.units_per_block,
'k_pers': (1, 0.05, 0.2, 1),
'activations': (T.tanh, T.tanh, T.tanh, None),
'layer_classes': [
HiddenBlockLayer,
layer_class,
layer_class,
HiddenBlockLayer,
],
},
)
exps.add_layers_description(
5,
{
'n_hids': (50, 100, 20),
'n_units_per': args.units_per_block,
'k_pers': (1, 0.05, 0.05, 1),
'activations': (T.tanh, T.tanh, T.tanh, None),
'layer_classes': [
HiddenBlockLayer,
layer_class,
layer_class,
HiddenBlockLayer,
],
'index_selection_funcs': (
None, all_same, all_same, None
)
},
)
exps.add_layers_description(
6,
{
'n_hids': (25, 100, 100, 25),
'n_units_per': args.units_per_block,
'k_pers': (1, 0.05, 0.05, 1, 1),
'activations': (T.tanh, T.tanh, T.tanh, T.tanh, None),
'layer_classes': [
HiddenBlockLayer,
layer_class,
layer_class,
layer_class,
HiddenBlockLayer,
],
}
)
exps.add_layers_description(
7,
{
'n_hids': (25, 100, 100, 25),
'n_units_per': args.units_per_block,
'k_pers': (1, 0.05, 0.05, 1, 1),
'activations': (T.tanh, T.tanh, T.tanh, T.tanh, None),
'layer_classes': [
HiddenBlockLayer,
layer_class,
layer_class,
layer_class,
HiddenBlockLayer,
],
'index_selection_funcs': (
None, all_same, all_same, all_same, None
)
}
)
exps.add_layers_description(
8,
{
'n_hids': (50, 200, 500, 200, 50),
'n_units_per': args.units_per_block,
'k_pers': (1., 0.1, 0.02, 0.02, 0.1, 1),
'activations': (
None, T.tanh, T.tanh, T.tanh, T.tanh, None
),
'layer_classes': [
HiddenBlockLayer,
layer_class,
layer_class,
layer_class,
layer_class,
HiddenBlockLayer,
],
}
)
exps.add_layers_description(
9,
{
'n_hids': (50, 75, 200, 75, 50),
'n_units_per': args.units_per_block,
'k_pers': (1., 0.1, 0.05, 0.05, 0.1, 1),
'activations': (
T.tanh, T.tanh, T.tanh, T.tanh, T.tanh, None
),
'layer_classes': [
HiddenBlockLayer,
layer_class,
layer_class,
layer_class,
layer_class,
HiddenBlockLayer,
],
'index_selection_funcs': (
None, all_same, all_same, all_same,
all_same, None
)
}
)
exps.add_layers_description(
10,
{
'n_hids': (50, 500, 500, 500, 500, 20),
'n_units_per': args.units_per_block,
'k_pers': (1, 0.07, 0.03, 0.02, 0.01, 0.15, 1),
'activations': (
T.tanh, T.tanh, T.tanh, T.tanh, T.tanh, T.tanh, None
),
'layer_classes': [
HiddenBlockLayer,
layer_class,
layer_class,
layer_class,
layer_class,
layer_class,
HiddenBlockLayer,
],
},
)
exps.add_layers_description(
11,
{
'n_hids': (50, 500, 500, 500, 500, 20),
'n_units_per': args.units_per_block,
'k_pers': (1, 0.07, 0.03, 0.02, 0.01, 0.15, 1),
'activations': (
T.tanh, T.tanh, T.tanh, T.tanh, T.tanh, T.tanh, None
),
'layer_classes': [
HiddenBlockLayer,
layer_class,
layer_class,
layer_class,
layer_class,
layer_class,
HiddenBlockLayer,
],
'index_selection_funcs': (
None, all_same, all_same, all_same,
all_same, all_same, None
)
}
)
exps.add_layers_description(
12,
{
'n_hids': (50, 100, 500, 500, 500, 500, 500, 100, 20),
'n_units_per': args.units_per_block,
'k_pers': (1, 0.1, 0.05, 0.01, 0.01, 0.01, 0.01, 0.05, 0.1, 1),
'activations': (
None, T.tanh, T.tanh, T.tanh, T.tanh,
T.tanh, T.tanh, T.tanh, T.tanh, None
),
'layer_classes': [
HiddenBlockLayer,
layer_class,
layer_class,
layer_class,
layer_class,
layer_class,
layer_class,
layer_class,
layer_class,
HiddenBlockLayer,
],
},
)
exps.add_layers_description(
13,
{
'n_hids': (50, 100, 500, 500, 500, 500, 500, 100, 20),
'n_units_per': args.units_per_block,
'k_pers': (1, 0.1, 0.05, 0.01, 0.01, 0.01, 0.1, 0.5, 0.1, 1),
'activations': (
None, T.tanh, T.tanh, T.tanh, T.tanh,
T.tanh, T.tanh, T.tanh, T.tanh, None
),
'layer_classes': [
HiddenBlockLayer,
layer_class,
layer_class,
layer_class,
layer_class,
layer_class,
layer_class,
layer_class,
layer_class,
HiddenBlockLayer,
],
'index_selection_funcs': (
None, all_same, all_same, all_same,
all_same, all_same, None
)
}
)
exps.add_layers_description(
14,
{
'n_hids': (50, 100, 20),
'n_units_per': args.units_per_block,
'k_pers': (1, 0.05, 0.05, 1),
'activations': (T.tanh, T.tanh, T.tanh, None),
'layer_classes': [
layer_class,
layer_class,
layer_class,
layer_class,
],
},
)
# Add parameter combinations
for idx, batch_size in enumerate(args.batch_sizes):
exps.add_parameters(
idx,
{
'n_epochs': args.number_of_epochs,
'batch_size': batch_size,
'learning_rate': LinearChangeRate(
0.21, -0.01, 0.2, 'learning_rate'
),
'L1_reg': 0.0,
'L2_reg': 0.0001
}
)
if len(args.use_layers) > 0:
print 'Executing experiments for layers %s' % args.use_layers
exps.create_experiments(args.use_layers)
else:
exps.create_experiments()
run_experiments(
exps,
models=[
EqualParametersModel,
EqualComputationsModel,
SparseBlockModel
]
)
|
[
"daemonmaker@gmail.com"
] |
daemonmaker@gmail.com
|
64923cbcfed9624b6b8c664e8deaf3ad28ade468
|
a756e26160502b49dea686baa4f8d8480895ab85
|
/PartB_LBC_CorrespondingStates.py
|
128522479dc943e764d6b24b93be59a28c826581
|
[] |
no_license
|
Aitous/ENE251
|
adeb715ad24094765e23d03a481e309ab2dd3f8c
|
e7770c469f63683c4c3ea7916d8bcad64ad16593
|
refs/heads/master
| 2022-12-04T18:01:57.908783
| 2020-08-19T21:16:54
| 2020-08-19T21:16:54
| 288,349,026
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,765
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[38]:
import time
import numpy as np
import matplotlib.pyplot as plt
import math
from sklearn.linear_model import LinearRegression
from scipy import interpolate
# Solving RachfordRice
def SolveRachfordRice(l, Nc, z, K):
F = lambda l: sum([(1 - K[i]) * z[i]/(K[i] + (1 - K[i]) * l) for i in range(Nc)])
dF = lambda l: sum([-z[i] * (1 - K[i])**2/((K[i] + (1 - K[i]) * l)**2) for i in range(Nc)])
F0 = F(0)
F1 = F(1)
if(F0 > 0 and F1 < 0):
lmin = 0
lmax = 1
elif(F0 > 0 and F1 > 0):
lmin = 1
lmax = np.max([(K[i]*z[i] - K[i])/(1 - K[i]) for i in range(Nc)])
else:
lmax = 0
lmin = np.min([(z[i] - K[i])/(1 - K[i]) for i in range(Nc)])
useNewton = True #Change to false for bisection only
error = [] #error array
i = 0
tol = 1.e-5
while abs(F(l)) > tol:
if(F(l) > 0):
lmin = l
else:
lmax = l
delta_l = - F(l) / dF(l)
if(l + delta_l > lmin and l + delta_l < lmax and useNewton):
l = l + delta_l
else:
l = 0.5 * (lmin + lmax)
error.append(F(l))
#print('error = ', error[i]) #reporting error for each step
i += 1
return l
#Calculating the a's and b's of the vapor and liquid phases. The function kij loads the interaction coefficients based on the EOS of interest
def kij(EOS):
if EOS is 'PR':
return np.zeros((19,19))
elif EOS is 'SRK':
return np.array([[0 , 0.1, 0.1257, 0.0942], [0.1, 0, 0.027, 0.042], [0.1257, 0.027, 0, 0.008], [0.0942, 0.042, 0.008, 0]])
return Kij
elif EOS is 'RK':
return np.zeros([3,3])
def calc_a(EOS, T, Tc, Pc, omega):
'''calculates ai for each component for the EOS of interest
EOS: Equation of state (PR, SRK, or RK)
T, Tc: temperature and critical temperature of the component
Pc: critical pressure of the component
omega: accentric factor for the component'''
R = 8.314
if EOS is 'PR':
fw = 0.37464 + 1.54226*omega - 0.26992*omega**2
a1 = np.divide(0.45724*R**2*Tc**2 , Pc)
a2 = (1 + np.multiply(fw, (1 - np.sqrt(np.divide(T, Tc)))))**2
a = np.multiply(a1, a2)
elif EOS is 'SRK':
fw = 0.48 + 1.574*omega - 0.176*omega**2
a1 = np.divide((0.42748*R**2*Tc**2), Pc)
a2 = (1 + np.multiply(fw, (1 - np.sqrt(np.divide(T, Tc)))))**2
a = np.multiply(a1, a2)
elif EOS is 'RK':
a = np.divide(0.42748*R**2*Tc**(5/2), (Pc*T**0.5))
else:
print('parameters for his EOS is not defined')
return a
def calc_b(EOS, Tc, Pc):
'''calculates bi for each component for the EOS of interest
EOS: Equation of state (PR, SRK, or RK)
Tc: critical temperature of the component
Pc: critical pressure of the component
'''
R = 8.314 # gas constant
# The below if statement computes b for each
# componenet based on the EOS of
# interest (Table 5.1 in the course reader)
if EOS is 'PR':
b = np.divide(0.07780*R*Tc, Pc)
elif EOS is 'SRK':
b = np.divide(0.08664*R*Tc, Pc)
elif EOS is 'RK':
b = np.divide(0.08664*R*Tc ,Pc)
return b
def find_am(EOS, y, T, Tc, Pc, omega):
''' calculates the a parameter for the EOS of interest
EOS: equation of state of interest (PR, SRK, RK)
y: vapor or liquid compositions
T, Tc: temperature value and critical temperature array
Pc: critical pressure array
omega: accentric factors array '''
kijs = kij(EOS)
am = np.sum(y[i]*y[j]*np.sqrt(calc_a(EOS, T, Tc[i], Pc[i], omega[i]) *calc_a(EOS, T, Tc[j], Pc[j], omega[j]))*(1-kijs[i,j]) for i in range(len(y)) for j in range(len(y)))
return am
def find_bm(EOS, y, Tc, Pc):
'''This function computes the b for the mixture for the EOS of interest
EOS: Equation of state (PR, SRK, or RK)
y: liquid or vapor compositions array
Tc and Pc: critical temperature and pressure array
'''
bm = np.sum(np.multiply(y, calc_b(EOS, Tc, Pc)))
return bm
def Z_factor(EOS, P, T, a, b):
'''This function computes the Z factor for the cubic EOS of interest
EOS: equation of state (PR, SRK, or RK)
P, T: pressure and temperature
a, b: the vapor or liquid parameters of equation of state
'''
R = 8.314 # gas constant
if EOS == 'PR':
u = 2
w = -1
elif EOS == 'SRK':
u = 1
w = 0
elif EOS == 'RK':
u = 1
w = 0
A = np.divide(a*P, R**2*T**2)
B = np.divide(b*P, R*T)
Coeffs = list()
Coeffs.append(1)
Coeffs.append(-(1 + B - u*B))
Coeffs.append(A + w*B**2 - u*B - u*B**2)
Coeffs.append(-np.multiply(A, B) - w*B**2 - w*B**3)
Z = np.roots(Coeffs)
# remove the roots with imaginary parts
Z = np.real(Z[np.imag(Z) == 0])
Zv = max(Z)
Zl = min(Z)
return Zv, Zl
def get_fug(EOS, y, Z, Tc, Pc, P, T, omega, a, b):
'''This function computes the liquid or vapor fugacity of all components
using Eq. 6.8 in course reader
parameters needed:
EOS: equation of state (PR, SRK, or RK)
y: liquid or vapor compositions
Z: z-factors for vapor or liquid
Tc and Pc: critical temperature and pressure for all individual comp.s
P, T: pressure and temperature of the system
omega: accentric factors for all individual components
a and b: EOS parameters as computed in another function
'''
R = 8.314 # gas constant
if EOS is 'PR':
u = 2
w = -1
kijs = kij(EOS)
elif EOS is 'SRK':
u = 1
w = 0
kijs = kij(EOS)
elif EOS is 'RK':
u = 1
w = 0
kijs = kij(EOS)
fug = np.zeros(y.shape)
A = np.divide(a*P, R**2*T**2)
B = np.divide(b*P, R*T)
delta_i = list()
a_i = list()
for i in range(len(y)):
a_i.append(calc_a(EOS, T, Tc[i], Pc[i], omega[i]))
for i in range(len(y)):
xa = 0
for j in range(len(y)):
xa += y[j] * math.sqrt(a_i[j]) * (1 - kijs[i][j])
delta_i.append(2 * math.sqrt(a_i[i]) / a * xa)
for i in range(len(fug)):
bi = calc_b(EOS, Tc, Pc)[i]
ln_Phi = bi/b * (Z - 1) - math.log(Z - B) + A / (B * math.sqrt(u**2 - 4*w)) * (bi/b - delta_i[i]) * math.log((2 * Z + B *(u + math.sqrt(u**2 - 4*w))) /(2 * Z + B *(u - math.sqrt(u**2 - 4*w))))
fug[i] = y[i] * P * math.exp(ln_Phi)
return fug
def Ki_guess(Pc, Tc, P, T, omega, Nc):
Ki = np.array([Pc[i]/P * np.exp(5.37 * (1 + omega[i]) * (1 - Tc[i]/T)) for i in range(Nc)])
return Ki
def flash(EOS, l, Nc, zi, Tc, Pc, P, T, omega):
Ki = Ki_guess(Pc, Tc, P, T, omega, Nc)
tol = 1e-5
R = 8.314 # gas constant
l = SolveRachfordRice(l, Nc, zi, Ki)
xi = np.divide(zi, l+(1-l)*Ki)
yi = np.divide(np.multiply(Ki, zi), (l+(1-l)*Ki))
av = find_am(EOS,yi,T,Tc,Pc,omega)
al = find_am(EOS,xi,T,Tc,Pc,omega)
bv = find_bm(EOS,yi,Tc,Pc)
bl = find_bm(EOS,xi,Tc,Pc)
#Z and fugacity determination for the vapor phase based on minimising Gibbs Free Energy
Zv = Z_factor(EOS,P,T,av,bv) #containing the max and min roots
fugV_v = get_fug(EOS, yi, Zv[0], Tc, Pc, P, T, omega, av, bv)
fugV_l = get_fug(EOS, yi, Zv[1], Tc, Pc, P, T, omega, av, bv)
deltaGV = np.sum(yi * np.log(fugV_l / fugV_v))
if deltaGV <= 0:
Zv = Zv[1]
fug_v = fugV_l
else:
Zv = Zv[0]
fug_v = fugV_v
#Z and fugacity determination for the liquid phase based on minimising Gibbs Free Energy
Zl = Z_factor(EOS,P,T,al,bl) #containing the max and min roots
fugL_v = get_fug(EOS, xi, Zl[0], Tc, Pc, P, T, omega, al, bl)
fugL_l = get_fug(EOS, xi, Zl[1], Tc, Pc, P, T, omega, al, bl)
deltaGL = np.sum(xi * np.log(fugL_l / fugL_v))
if deltaGL <= 0:
Zl = Zl[1]
fug_l = fugL_l
else:
Zl = Zl[0]
fug_l = fugL_v
while np.max(abs(np.divide(fug_v, fug_l) - 1)) > tol:
Ki = Ki * np.divide(fug_l, fug_v)
l = SolveRachfordRice(l, Nc, zi, Ki)
xi = np.divide(zi, l+(1-l)*Ki)
yi = np.divide(np.multiply(Ki, zi), (l+(1-l)*Ki))
av = find_am(EOS,yi,T,Tc,Pc,omega)
al = find_am(EOS,xi,T,Tc,Pc,omega)
bv = find_bm(EOS,yi,Tc,Pc)
bl = find_bm(EOS,xi,Tc,Pc)
#Z and fugacity determination for the vapor phase based on minimising Gibbs Free Energy
Zv = Z_factor(EOS,P,T,av,bv) #containing the max and min roots
fugV_v = get_fug(EOS, yi, Zv[0], Tc, Pc, P, T, omega, av, bv)
fugV_l = get_fug(EOS, yi, Zv[1], Tc, Pc, P, T, omega, av, bv)
deltaGV = np.sum(yi * np.log(fugV_l / fugV_v))
if deltaGV <= 0:
Zv = Zv[1]
fug_v = fugV_l
else:
Zv = Zv[0]
fug_v = fugV_v
#Z and fugacity determination for the liquid phase based on minimising Gibbs Free Energy
Zl = Z_factor(EOS,P,T,al,bl) #containing the max and min roots
fugL_v = get_fug(EOS, xi, Zl[0], Tc, Pc, P, T, omega, al, bl)
fugL_l = get_fug(EOS, xi, Zl[1], Tc, Pc, P, T, omega, al, bl)
deltaGL = np.sum(xi * np.log(fugL_l / fugL_v))
if deltaGL <= 0:
Zl = Zl[1]
fug_l = fugL_l
else:
Zl = Zl[0]
fug_l = fugL_v
Vv = np.divide(Zv*R*T, P)
Vl = np.divide(Zl*R*T, P)
return (fug_v, fug_l, l, xi, yi)
def volumeCorrection(EOS, V, zi, Pc, Tc):
Mw = np.array([44.01, 28.013, 16.043, 30.07, 44.097, 58.123, 58.123, 72.15, 72.15, 84, 96, 107, 121, 134, 163.5, 205.4, 253.6, 326.7, 504.4])
if EOS == "PR":
#Si from the reader page 129
S = [-0.1540, 0.1002, -0.08501, -0.07935, -0.06413, -0.04350, -0.04183, -0.01478]
c = [3.7, 0] #CO2 and N2
#For the heavy components
for i in range(10, len(Pc)):
S.append(1 - 2.258/Mw[i]**0.1823) #values correlated for heavier components (+C7)
for i in range(0, len(Pc)-2):
c.append(S[i] * calc_b(EOS, Tc[i+2], Pc[i+2]))
V = V - np.sum([zi[i] * c[i] for i in range(len(Pc))])
return V
def volume(EOS, P, T, Pc, Tc, omega, zi = np.array([1]), mixture = False):
R = 8.314
if not mixture:
a = calc_a(EOS, T, Tc, Pc, omega)
b = calc_b(EOS, Tc, Pc)
Z = Z_factor(EOS,P,T,a,b)
fug_v = get_fug(EOS, zi, Z[0], Tc, Pc, P, T, omega, a, b)
fug_l = get_fug(EOS, zi, Z[1], Tc, Pc, P, T, omega, a, b)
deltaG = np.sum(zi * np.log(fug_l / fug_v))
if deltaG <= 0:
Z = Z[1]
fug = fug_l
else:
Z = Z[0]
fug = fug_v
V = np.divide(Z*R*T, P)
else:
bm = find_bm(EOS, zi, Tc, Pc)
am = find_am(EOS, zi, T, Tc, Pc, omega)
Z = Z_factor(EOS,P,T,am,bm)
fug_v = get_fug(EOS, zi, Z[0], Tc, Pc, P, T, omega, am, bm)
fug_l = get_fug(EOS, zi, Z[1], Tc, Pc, P, T, omega, am, bm)
deltaG = np.sum(zi * np.log(fug_v / fug_l))
if deltaG <= 0:
Z = Z[1]
fug = fug_l
else:
Z = Z[0]
fug = fug_v
V = np.divide(Z*R*T, P)
#V = volumeCorrection(EOS, V, zi, Pc, Tc)
return V
# Computes reference viscosity of methane using the correlation of Hanley et al. Cyrogenics, July 1975
# To be used for corresponding states computation of mixture viscosity
# A. R. Kovscek
# 20 November 2018
# Tref is the reference temperature in K (viscosity computed at this temperature)
# rho_ref is the reference density in g/cm3 (viscosity computed at this temperature and density)
# mu_C1 is the viscosity from correlation in mPa-s (identical to cP)
def ViscMethane(Tref,rho_ref):
import math
#Local variables
#critical density of methane (g/cm^3)
rho_c=16.043/99.2
#parameters for the dilute gas coefficient
GV=[-209097.5,264726.9,-147281.8,47167.40,-9491.872,1219.979,-96.27993,4.274152,-0.08141531]
#parameters for the first density correction term
Avisc1 = 1.696985927
Bvisc1 = -0.133372346
Cvisc1 = 1.4
Fvisc1 = 168.0
#parameters for the viscosity remainder
j1 = -10.35060586
j2 = 17.571599671
j3 = -3019.3918656
j4 = 188.73011594
j5 = 0.042903609488
j6 = 145.29023444
j7 = 6127.6818706
#compute dilute gas coefficient
visc0 = 0.
exp1 = 0.
for i in range(0,len(GV)):
exp1 = -1. + (i)*1./3.
visc0 = visc0 + GV[i]*math.pow(Tref,exp1)
#first density coefficient
visc1 = Avisc1+Bvisc1*math.pow((Cvisc1-math.log(Tref/Fvisc1)),2.)
#viscosity remainder
theta=(rho_ref-rho_c)/rho_c
visc2 = math.pow(rho_ref,0.1)
visc2 = visc2*(j2+j3/math.pow(Tref,1.5))+theta*math.sqrt(rho_ref)*(j5+j6/Tref+j7/math.pow(Tref,2.))
visc2 = math.exp(visc2)
visc2 = visc2 - 1.
visc2 = math.exp(j1+j4/Tref)*visc2
#methane viscosity at T and density (Tref,rho_ref)
#multiply by 10-4 to convert to mPa-s(cP)
mu_C1 = (visc0+visc1+visc2)*0.0001
return (mu_C1)
def get_interp_density(p, T):
'''
@param p: pressure in Pa
@param T: temperature in K
@return : methane density in kg/m3
'''
data_p = [0.1e6, 1e6, 3e6, 5e6, 10e6, 20e6, 50e6]
data_T = [90.7, 94, 98, 100, 105, 110, 120, 140, 170]
if p < data_p[0] or p > data_p[-1] or T < data_T[0] or T > data_T[-1]:
raise Exception('Input parameter out of range')
data_den = [[451.5, 447.11, 441.68, 438.94, 431.95, 424.79, 409.9, 1.403, 1.1467],
[451.79, 447.73, 442.34, 439.62, 432.7, 425.61, 410.9, 377.7, 14.247],
[453, 449.08, 443.78, 441.11, 434.32, 427.38, 413.05, 381.12, 314.99],
[454, 450.4, 445.19, 442.55, 435.89, 429.09, 415.1, 384.28, 324.32],
[456, 453.57, 448.55, 446.02, 439.63, 433.13, 419.9, 391.35, 340.6],
[460, 458, 454.74, 452.37, 446.43, 440.43, 428.32, 402.99, 361.57],
[477, 473, 470, 468, 463.2, 458.14, 448.08, 427.88, 397.48]]
f = interpolate.interp2d(data_T, data_p, data_den)
return f(T, p)
# In[39]:
def LBC_viscosity(P, T, zi, Tc, Pc, omega, Mw, Vci):
coef = [0.10230, 0.023364, 0.058533, -0.040758, 0.0093324]
Nc = len(zi)
EOS = 'PR'
Pmax = 3000 * 6894.76
Pressure = []
visc = []
while P < Pmax:
#flash
fug_v, fug_l, l, xi, yi = flash(EOS, 0.5, Nc, zi, Tc, Pc, P, T, omega)
if l>1:
xi = zi
#Computing Ksi
Ksi = 5.4402 * 399.54 * np.sum(xi * Tc)**(1/6)/np.multiply(np.sum(xi * Mw)**(0.5),np.sum(xi * Pc)**(2/3))
Ksi_i = 5.4402 * 399.54 * Tc**(1/6) * Mw**(-0.5) * Pc**(-2/3)
#Ksi_i = Tc**(1/6) * Mw**(-0.5) * Pc**(-2/3)
eta_star_i = np.zeros(xi.shape)
for i in range(Nc):
Tr = T/Tc[i]
if Tr < 1.5:
eta_star_i[i] = 34e-5 * (Tr**0.94)/Ksi_i[i]
else:
eta_star_i[i] = 17.78 * 1e-5 * ((4.58*Tr - 1.67)**0.625)/ Ksi_i[i]
eta_star = np.divide(np.sum(xi * eta_star_i * Mw**0.5), np.sum(xi * Mw**0.5))
MC7_plus = np.sum(xi[i] * Mw[i] for i in range(10, Nc)) / np.sum(xi[i] for i in range(10, Nc))
denC7_plus = 0.895
Vc_plus = (21.573 + 0.015122*MC7_plus - 27.656*denC7_plus + 0.070615*denC7_plus*MC7_plus) * 6.2372*1e-5
V_mixture = volume(EOS, P, T, Pc, Tc, omega, xi, True)
xC7_plus = np.sum(xi[i] for i in range(10, Nc))
Vc_mixture = np.sum(xi[i] * Vci[i] for i in range(10))*1e-6 + xC7_plus * Vc_plus
rho_r = Vc_mixture/V_mixture
viscosity = ((coef[0] + coef[1] * rho_r + coef[2] * rho_r**2 + coef[3] * rho_r**3 + coef[4] * rho_r**4)**4 - 0.0001)/Ksi + eta_star
visc.append(viscosity)
Pressure.append(P)
P = 1.1 * P
plt.plot(Pressure, visc)
plt.xlabel("Pressure (Pa)")
plt.ylabel("Viscosity (cP)")
plt.title("Viscosity vs pressure")
plt.show()
# In[40]:
P = 1500 * 6894.76
T = 106 + 273.15
Names = {'CO2' 'N2' 'C1' 'C2' 'C3' 'iC4' 'n-C4' 'i-C5' 'n-C5' 'C6' 'C7' 'C8' 'C9' 'C10' 'PS1' 'PS2' 'PS3' 'PS4' 'PS5'}
zi = np.array([0.0044, 0.0017, 0.3463, 0.0263, 0.0335, 0.092, 0.0175, 0.0089, 0.0101, 0.0152, 0.05, 0.0602, 0.0399, 0.0355, 0.1153, 0.0764, 0.0633, 0.0533, 0.0330])#oil composition
Tc = np.array([304.2, 126.2, 190.6, 305.4, 369.8, 408.1, 425.2, 460.4, 469.6, 507.4, 548, 575, 603, 626, 633.1803, 675.9365, 721.3435, 785.0532, 923.8101]) # in Kelvin
Pc = np.array([72.9, 33.6, 45.4, 48.2, 41.9, 36.0, 37.5, 33.4, 33.3, 29.3, 30.7, 28.4, 26, 23.9, 21.6722, 19.0339, 16.9562, 14.9613, 12.6979])*101325 # in Pa
omega = np.array([0.228, 0.04, 0.008, 0.098, 0.152, 0.176, 0.193, 0.227, 0.251, 0.296, 0.28, 0.312, 0.348, 0.385, 0.6254, 0.7964, 0.9805, 1.2222, 1.4000]) # accentric factors
Mw = np.array([44.01, 28.013, 16.043, 30.07, 44.097, 58.123, 58.123, 72.15, 72.15, 84, 96, 107, 121, 134, 163.5, 205.4, 253.6, 326.7, 504.4])
Vci = np.array([91.9, 84, 99.2, 147, 200, 259, 255, 311, 311, 368]) # cm3/mol
LBC_viscosity(P, T, zi, Tc, Pc, omega, Mw, Vci)
# In[41]:
def corresponding_state_Visco(P, T ,zi, Tc, Pc, omega, Mw):
R = 8.314 # gas constant
Names = {'CO2' 'N2' 'C1' 'C2' 'C3' 'iC4' 'n-C4' 'i-C5' 'n-C5' 'C6' 'C7' 'C8' 'C9' 'C10' 'PS1' 'PS2' 'PS3' 'PS4' 'PS5'}
Nc = len(zi)
EOS = 'PR'
tol = 1e-5
Pmax = 3000 * 6894.76
visc = []
Pressure = []
while P < Pmax:
fug_v, fug_l, l, xi, yi = flash(EOS, 0.5, Nc, zi, Tc, Pc, P, T, omega)
if l>1:
xi = zi
#Initiliazing
Tc_mix = 0
Mmix = 0
Mn = 0
M = 0
denominator = 0
for i in range(Nc):
Mn += xi[i] * Mw[i]
M += xi[i] * Mw[i]**2
for j in range(Nc):
Tc_mix += xi[i]*xi[j]*(Tc[i] * Tc[j])**(0.5)*((Tc[i]/Pc[i])**(1/3) + (Tc[j]/Pc[j])**(1/3))**3
denominator += xi[i]*xi[j]*((Tc[i]/Pc[i])**(1/3) + (Tc[j]/Pc[j])**(1/3))**3
Tc_mix = Tc_mix / denominator
Pc_mix = 8 * Tc_mix / denominator
M /= Mn
Mmix = 1.304 * 1e-4 * (M**2.303 - Mn**2.303) + Mn
Tr = (T * Tc[2])/Tc_mix
Pr = (P * Pc[2])/Pc_mix
rho_c = 162.84 #kg/m3
#volume correction
S = -0.154
b = calc_b(EOS, Tc[2], Pc[2])
Vc = volume(EOS, Pr, Tr, np.array([Pc[2]]), np.array([Tc[2]]), np.array([omega[2]]))
volume_cor = Vc - b * S
rho_r = Mw[2] * 1e-3 / volume_cor / rho_c
alpha_mix = 1 + 7.378 * 10**(-3) * rho_r ** 1.847 * Mmix**0.5173
alpha_0 = 1 + 0.031*rho_r**1.847
Tref = Tr * alpha_0 / alpha_mix
Pref = Pr * alpha_0 / alpha_mix
S = -0.085
Vc_ref = volume(EOS, Pref, Tref, np.array([Pc[2]]), np.array([Tc[2]]), np.array([omega[2]]))
volume_cor = Vc_ref - b * S
rho_ref = Mw[2]/volume_cor/ 1e6
visc_methane = ViscMethane(Tref, rho_ref)
visc_mix = (Tc_mix/Tc[2])**(-1/6) * (Pc_mix/Pc[2])**(2/3) * (Mmix/Mw[2])**(1/2) * alpha_mix / alpha_0 * visc_methane
visc.append(visc_mix)
Pressure.append(P)
#print(P, visc_mix)
P = 1.1 * P
plt.plot(Pressure, visc)
plt.xlabel("Pressure (Pa)")
plt.ylabel("Viscosity (cP)")
plt.title("Viscosity vs compositions")
plt.show()
# In[42]:
zi = np.array([0.0044, 0.0017, 0.3463, 0.0263, 0.0335, 0.092, 0.0175, 0.0089, 0.0101, 0.0152, 0.05, 0.0602, 0.0399, 0.0355, 0.1153, 0.0764, 0.0633, 0.0533, 0.0330])#oil composition
Tc = np.array([304.2, 126.2, 190.6, 305.4, 369.8, 408.1, 425.2, 460.4, 469.6, 507.4, 548, 575, 603, 626, 633.1803, 675.9365, 721.3435, 785.0532, 923.8101]) # in Kelvin
Pc = np.array([72.9, 33.6, 45.4, 48.2, 41.9, 36.0, 37.5, 33.4, 33.3, 29.3, 30.7, 28.4, 26, 23.9, 21.6722, 19.0339, 16.9562, 14.9613, 12.6979])*101325 # in Pa
omega = np.array([0.228, 0.04, 0.008, 0.098, 0.152, 0.176, 0.193, 0.227, 0.251, 0.296, 0.28, 0.312, 0.348, 0.385, 0.6254, 0.7964, 0.9805, 1.2222, 1.4000]) # accentric factors
Mw = np.array([44.01, 28.013, 16.043, 30.07, 44.097, 58.123, 58.123, 72.15, 72.15, 84, 96, 107, 121, 134, 163.5, 205.4, 253.6, 326.7, 504.4])
P = 1500 * 6894.76
T = 106 + 273.15
corresponding_state_Visco(P, T, zi, Tc, Pc, omega, Mw)
# In[43]:
def viscosity(Oilcomp, Injcomp, P, T, Pc, Tc, omega, Mw, Vci):
coef = [0.10230, 0.023364, 0.058533, -0.040758, 0.0093324]
EOS = 'PR'
Nc = len(Oilcomp)
alpha = 0.5
l = 0.5
tol = 1e-5
zi = Oilcomp + alpha * (Injcomp - Oilcomp)
fug_v, fug_l, l, xi, yi = flash(EOS, 0.5, Nc, zi, Tc, Pc, P, T, omega)
Ksi = 5.4402 * 399.54 * np.sum(xi * Tc)**(1/6)/np.multiply(np.sum(xi * Mw)**(0.5),np.sum(xi * Pc)**(2/3))
Ksi_i = 5.4402 * 399.54 * Tc**(1/6) * Mw**(-0.5) * Pc**(-2/3)
eta_star_i = np.zeros(xi.shape)
for i in range(Nc):
Tr = T/Tc[i]
if Tr < 1.5:
eta_star_i[i] = 34e-5 * (Tr**0.94)/Ksi_i[i]
else:
eta_star_i[i] = 17.78 * 1e-5 * ((4.58*Tr - 1.67)**0.625)/ Ksi_i[i]
eta_star = np.divide(np.sum(xi * eta_star_i * Mw**0.5), np.sum(xi * Mw**0.5))
MC7_plus = np.sum(xi[i] * Mw[i] for i in range(10, Nc)) / np.sum(xi[i] for i in range(10, Nc))
denC7_plus = 0.895
Vc_plus = (21.573 + 0.015122*MC7_plus - 27.656*denC7_plus + 0.070615*denC7_plus*MC7_plus) * 6.2372*1e-5
V_mixture = volume(EOS, P, T, Pc, Tc, omega, xi, True)
xC7_plus = np.sum(xi[i] for i in range(10, Nc))
Vc_mixture = np.sum(xi[i] * Vci[i] for i in range(10))*1e-6 + xC7_plus * Vc_plus
rho_r = Vc_mixture/V_mixture
viscosity = ((coef[0] + coef[1] * rho_r + coef[2] * rho_r**2 + coef[3] * rho_r**3 + coef[4] * rho_r**4)**4 - 0.0001)/Ksi + eta_star
return viscosity
# In[44]:
def vicosity_vs_composition(LPG_CO2_comb, Oilcomp, P, T, Pc, Tc, omega, Mw, Vci, makePlot = False):
'''This function computes the MMP for the different compositions of the LPG and CO2
and returns a plot of the MMP versus gas injectant composition.
LPG_CO2_comb contains the porcentage of LPG in the mixte.
an array of the form [0.7, 0.55, 0.4, 0.2, 0.1, 0.] means that for the first mixture
we have 70% LPG and 30% CO2 and for the second 55% LPG and 45% CO2 and so on...
The LPG composition is: C2: 0.01, C3: 0.38, iC4: 0.19, nC4: 0.42.
'''
#reservoir Oil components.
Names = {'CO2' 'N2' 'C1' 'C2' 'C3' 'i-C4' 'n-C4' 'i-C5' 'n-C5' 'C6' 'C7' 'C8' 'C9' 'C10' 'PS1' 'PS2' 'PS3' 'PS4' 'PS5'}
LPG = np.array([0, 0, 0, 0.01, 0.38, 0.19, 0.42, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
CO2 = np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
numMixtures = len(LPG_CO2_comb)
Viscosity = []
composition = []
for i in range(numMixtures):
Injcomp = np.array(LPG_CO2_comb[i] * LPG + (1 - LPG_CO2_comb[i]) * CO2)
Viscosity.append(viscosity(Oilcomp, Injcomp, P, T, Pc, Tc, omega, Mw, Vci))
composition.append(LPG_CO2_comb[i])
if makePlot:
plt.plot(composition, Viscosity)
plt.xlabel('Composition (mole fraction of the LPG)')
plt.ylabel('Viscosity (cP)')
plt.title('Viscosty vs Injectant composition')
plt.show()
# In[45]:
P = 28e6
T = 106 + 273.15
Names = {'CO2' 'N2' 'C1' 'C2' 'C3' 'iC4' 'n-C4' 'i-C5' 'n-C5' 'C6' 'C7' 'C8' 'C9' 'C10' 'PS1' 'PS2' 'PS3' 'PS4' 'PS5'}
zi = np.array([0.0044, 0.0017, 0.3463, 0.0263, 0.0335, 0.092, 0.0175, 0.0089, 0.0101, 0.0152, 0.05, 0.0602, 0.0399, 0.0355, 0.1153, 0.0764, 0.0633, 0.0533, 0.0330])#oil composition
Tc = np.array([304.2, 126.2, 190.6, 305.4, 369.8, 408.1, 425.2, 460.4, 469.6, 507.4, 548, 575, 603, 626, 633.1803, 675.9365, 721.3435, 785.0532, 923.8101]) # in Kelvin
Pc = np.array([72.9, 33.6, 45.4, 48.2, 41.9, 36.0, 37.5, 33.4, 33.3, 29.3, 30.7, 28.4, 26, 23.9, 21.6722, 19.0339, 16.9562, 14.9613, 12.6979])*101325 # in Pa
omega = np.array([0.228, 0.04, 0.008, 0.098, 0.152, 0.176, 0.193, 0.227, 0.251, 0.296, 0.28, 0.312, 0.348, 0.385, 0.6254, 0.7964, 0.9805, 1.2222, 1.4000]) # accentric factors
Mw = np.array([44.01, 28.013, 16.043, 30.07, 44.097, 58.123, 58.123, 72.15, 72.15, 84, 96, 107, 121, 134, 163.5, 205.4, 253.6, 326.7, 504.4])
Vci = np.array([91.9, 84, 99.2, 147, 200, 259, 255, 311, 311, 368]) # cm3/mol
vicosity_vs_composition(np.array([0.7, 0.55, 0.4, 0.2, 0.1, 0.]), zi, P, T, Pc, Tc, omega, Mw, Vci, True)
# In[47]:
#Plotting the experimental CCE curve of pressure against relative volume.
x = np.array([
20096156.97
,19406680.97
,18717204.97
,16834935.49
,15476667.77
,15312158.8
,13890872.97
,10443492.97
])
y = np.array([
0.622
,0.6162
,0.611
,0.597
,0.5869
,0.5857
,0.6341
,0.708
])
plt.plot(x, y, 'r')
plt.xlabel("Pressure (Pa)")
plt.ylabel("Viscosity (cP)")
plt.title("Experimental evolution of viscosity")
plt.legend(loc='best')
plt.show()
# In[ ]:
|
[
"youssef.aitousarrah@gmail.com"
] |
youssef.aitousarrah@gmail.com
|
1dfee621f2c8bf35b8a73f7fbbb1a64d238e125a
|
bbb21bb79c8c3efbad3dd34ac53fbd6f4590e697
|
/week3/TODO/TODO/settings.py
|
947cd11c197d9ed2bf30a09cd9c4016007788b22
|
[] |
no_license
|
Nusmailov/BFDjango
|
b14c70c42da9cfcb68eec6930519da1d0b1f53b6
|
cab7f0da9b03e9094c21efffc7ab07e99e629b61
|
refs/heads/master
| 2020-03-28T21:11:50.706778
| 2019-01-21T07:19:19
| 2019-01-21T07:19:19
| 149,136,999
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,094
|
py
|
"""
Django settings for TODO project.
Generated by 'django-admin startproject' using Django 2.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'mitb8&^*0ibt!u_xqe1!tjzumo65hy@cnxt-z#+9+p@m$u8qnn'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'TODO.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'TODO.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"nusmailov@gmail.com"
] |
nusmailov@gmail.com
|
398f138bde398c4dea87e5a99707f52b0581bd66
|
6cabff723ad404c3883037d9fa1d32298c27b23e
|
/练习/实战8.py
|
0275a7e71db5efc7a6797d06ef684f6bb633958c
|
[] |
no_license
|
Brandyzwz/practice
|
300c128947e59b209098c60131ed3750e982b28a
|
661f74851af6208b5c6880b41ba5d7bc9da6bf5c
|
refs/heads/master
| 2020-03-25T02:25:51.136558
| 2018-08-02T12:06:44
| 2018-08-02T12:06:44
| 143,289,146
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
a = int(input('输入直角边a:\n'))
b = int(input('输入直角边b:\n'))
c = (a**2+b**2)**0.5
print('斜边c的长为:%d'%c)
|
[
"brandyzwz@outlook.com"
] |
brandyzwz@outlook.com
|
7b91e3b074f85271a746505ec2100144aaa01af3
|
d7641647d67d110e08997767e85bbea081c2537b
|
/bitmovin_api_sdk/models/filter.py
|
6836a5d837fc6e5d357ec0e2fa2c5884394e48d2
|
[
"MIT"
] |
permissive
|
aachenmax/bitmovin-api-sdk-python
|
d3ded77c459852cbea4927ff28c2a4ad39e6026a
|
931bcd8c4695a7eb224a7f4aa5a189ba2430e639
|
refs/heads/master
| 2022-11-16T08:59:06.830567
| 2020-07-06T07:16:51
| 2020-07-06T07:16:51
| 267,538,689
| 0
| 1
|
MIT
| 2020-07-06T07:16:52
| 2020-05-28T08:44:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,781
|
py
|
# coding: utf-8
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
from bitmovin_api_sdk.models.bitmovin_resource import BitmovinResource
import pprint
class Filter(BitmovinResource):
discriminator_value_class_map = {
'CROP': 'CropFilter',
'CONFORM': 'ConformFilter',
'WATERMARK': 'WatermarkFilter',
'ENHANCED_WATERMARK': 'EnhancedWatermarkFilter',
'ROTATE': 'RotateFilter',
'DEINTERLACE': 'DeinterlaceFilter',
'AUDIO_MIX': 'AudioMixFilter',
'DENOISE_HQDN3D': 'DenoiseHqdn3dFilter',
'TEXT': 'TextFilter',
'UNSHARP': 'UnsharpFilter',
'SCALE': 'ScaleFilter',
'INTERLACE': 'InterlaceFilter',
'AUDIO_VOLUME': 'AudioVolumeFilter',
'EBU_R128_SINGLE_PASS': 'EbuR128SinglePassFilter'
}
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
if hasattr(super(Filter, self), "to_dict"):
result = super(Filter, self).to_dict()
for k, v in iteritems(self.discriminator_value_class_map):
if v == type(self).__name__:
result['type'] = k
break
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Filter):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"openapi@bitmovin.com"
] |
openapi@bitmovin.com
|
931cf513935db910bfd70b2fad4b1ab03410eaa3
|
6fc9e67094d60cb192dcd4e3370e41aae00e73b2
|
/rotate.py
|
cba2a8d2d2d33fa924b5fc5ad00ad3908e89c346
|
[] |
no_license
|
Nusha97/IRIS-Project
|
4c287ac87c482d1dea220a79e9fad5546a954bf5
|
670f66afb7ace4d8e65e2071d28f479b8573972f
|
refs/heads/master
| 2021-08-07T10:58:02.602248
| 2020-04-21T10:59:43
| 2020-04-21T10:59:43
| 158,009,001
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,685
|
py
|
# -*- coding: utf-8 -*-
"""
Automatically detect rotation and line spacing of an image of text using
Radon transform
If image is rotated by the inverse of the output, the lines will be
horizontal (though they may be upside-down depending on the original image)
It doesn't work with black borders
"""
from __future__ import division, print_function
from skimage.transform import radon
from PIL import Image
from numpy import asarray, mean, array, blackman
import numpy
from numpy.fft import rfft
import matplotlib.pyplot as plt
from matplotlib.mlab import rms_flat
try:
# More accurate peak finding from
# https://gist.github.com/endolith/255291#file-parabolic-py
from parabolic import parabolic
def argmax(x):
return parabolic(x, numpy.argmax(x))[0]
except ImportError:
from numpy import argmax
filename = '2Drotate.png'
# Load file, converting to grayscale
I = asarray(Image.open(filename).convert('L'))
I = I - mean(I) # Demean; make the brightness extend above and below zero
# Do the radon transform and display the result
sinogram = radon(I)
plt.gray()
# Find the RMS value of each row and find "busiest" rotation,
# where the transform is lined up perfectly with the alternating dark
# text and white lines
r = array([rms_flat(line) for line in sinogram.transpose()])
rotation = argmax(r)
print('Rotation: {:.2f} degrees'.format(90 - rotation))
import argparse
import cv2
import numpy as np
img = cv2.imread('2Drotate.png', 0)
rows,cols = img.shape
M = cv2.getRotationMatrix2D((cols/2,rows/2),90 - rotation,1)
dst = cv2.warpAffine(img,M,(cols,rows))
plt.plot(121),plt.imshow(dst),plt.title('Output')
plt.savefig('hello.png')
plt.show()
|
[
"noreply@github.com"
] |
Nusha97.noreply@github.com
|
1f0f45aa77603540df78c0dde6159ce16e10364a
|
873b6d338e696b200d1a6ca74bef85deaa8d8088
|
/manage.py
|
b4f28f3c7627588f205a3b43b083e42d7990486f
|
[] |
no_license
|
Craiglal/ChudoSkrynia
|
9720c8360f1589b97c15c5cfa48ba15bf2c6d0e7
|
ef2ca9ca356666628f2c8e4d1df8e97e0d0f72eb
|
refs/heads/master
| 2020-08-15T09:23:57.076445
| 2019-10-15T14:28:26
| 2019-10-15T14:28:26
| 215,316,335
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 544
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ChudoSkrynia.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"asleep.alex@gmail.com"
] |
asleep.alex@gmail.com
|
d82cf9e821ecf30bd91d020d422728952809a303
|
597ed154876611a3d65ca346574f4696259d6e27
|
/dbaas/workflow/steps/tests/test_vm_step.py
|
1f05feed7c79364c570f0ed132f5da3578825a91
|
[] |
permissive
|
soitun/database-as-a-service
|
41984d6d2177734b57d726cd3cca7cf0d8c5f5d6
|
1282a46a9437ba6d47c467f315b5b6a3ac0af4fa
|
refs/heads/master
| 2023-06-24T17:04:49.523596
| 2018-03-15T19:35:10
| 2018-03-15T19:35:10
| 128,066,738
| 0
| 0
|
BSD-3-Clause
| 2022-05-10T22:39:58
| 2018-04-04T13:33:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,661
|
py
|
from mock import patch
from physical.tests.factory import HostFactory, EnvironmentFactory
from ..util.vm import VmStep, MigrationWaitingBeReady
from . import TestBaseStep
@patch('workflow.steps.util.vm.get_credentials_for', return_value=True)
@patch('workflow.steps.util.vm.CloudStackProvider', return_value=object)
class VMStepTests(TestBaseStep):
def setUp(self):
super(VMStepTests, self).setUp()
self.host = self.instance.hostname
def test_environment(self, *args, **kwargs):
vm_step = VmStep(self.instance)
self.assertEqual(vm_step.environment, self.environment)
def test_host(self, *args, **kwargs):
vm_step = VmStep(self.instance)
self.assertEqual(vm_step.host, self.host)
@patch('workflow.steps.util.vm.get_credentials_for', return_value=True)
@patch('workflow.steps.util.vm.CloudStackProvider', return_value=object)
class VMStepTestsMigration(TestBaseStep):
def setUp(self):
super(VMStepTestsMigration, self).setUp()
self.host = self.instance.hostname
self.future_host = HostFactory()
self.host.future_host = self.future_host
self.host.save()
self.environment_migrate = EnvironmentFactory()
self.environment.migrate_environment = self.environment_migrate
self.environment.save()
def test_environment(self, *args, **kwargs):
vm_step = MigrationWaitingBeReady(self.instance)
self.assertEqual(vm_step.environment, self.environment_migrate)
def test_host(self, *args, **kwargs):
vm_step = MigrationWaitingBeReady(self.instance)
self.assertEqual(vm_step.host, self.future_host)
|
[
"mauro_murari@hotmail.com"
] |
mauro_murari@hotmail.com
|
4f9b6137e32d293b24b9b687905de81bb99a0239
|
e28a89a1d79ab7b8ebf579a451b922b357206bb0
|
/leecode/1190. 反转每对括号间的子串.py
|
1d711d65956622209748a7ea68aac81cccb27e98
|
[] |
no_license
|
bobobyu/leecode-
|
67dfcf9f9891a39d68e9c610e896c7151809d529
|
015d42bb58c19869658d3f6405435134ac5444df
|
refs/heads/master
| 2023-01-30T23:35:00.882463
| 2020-12-18T02:20:28
| 2020-12-18T02:20:28
| 308,018,638
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 637
|
py
|
from typing import *
from collections import *
class Solution:
def reverseParentheses(self, s: str) -> str:
string_stack: Deque = deque()
s = list(s)
while s:
if (op := s.pop(0)) == '(':
string_stack.append('(')
elif op == ')':
temp: str = ''
while string_stack[-1] != '(':
temp = string_stack.pop() + temp
string_stack[-1] = temp[::-1]
else:
string_stack.append(op)
return ''.join(string_stack)
s = Solution()
print(s.reverseParentheses("a(bcdefghijkl(mno)p)q"))
|
[
"676158322@qq.com"
] |
676158322@qq.com
|
b61bba4f1a3cafc372508b61c5bc9307207181e7
|
7cc4b082d0af7622cd77204a1eef2311c24445de
|
/my-venv/bin/wheel
|
e1988530207fb5198672a49ecda9425ec3401572
|
[] |
no_license
|
aasthakumar/ChatClient
|
cbfc18ba4cec6c20280680c659214a458cef5688
|
32c959488eda73caa81c8643957dbf1e6f79c77a
|
refs/heads/master
| 2020-03-15T01:50:46.755513
| 2018-05-02T20:38:43
| 2018-05-02T20:38:43
| 131,903,332
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 268
|
#!/Users/aastha/Documents/GitHub/CMPE-273-quizzes/lab3/my-venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"aastha.kumar@sjsu.edu"
] |
aastha.kumar@sjsu.edu
|
|
16eb3d8ca61b71e4472dd9dbccbad3c0497e2eb6
|
9f59572095262bb77b1069154dd70f52a2743582
|
/utils/.history/helper_20210303152111.py
|
db03f12b94ec4773cd7b73e126c98158e374fa06
|
[] |
no_license
|
zhang199609/diagnose_fault_by_vibration
|
ef089807fd3ae6e0fab71a50863c78ea163ad689
|
7b32426f3debbe9f98a59fe78acdec3ad6a186fd
|
refs/heads/master
| 2023-04-15T15:35:59.000854
| 2021-04-23T06:53:40
| 2021-04-23T06:53:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 868
|
py
|
"""
提高代码简洁性和可交互性的代码
"""
import time
def get_running_time(fun):
"""
代码运行时间装饰器
参数
-----
fun:要测试运行时间的函数
返回
-----
返回装饰器wrapper
例子
-----
>>> @get_running_time
def hello(name):
print("hello %s"%name)
time.sleep(3)
>>>
hello("Tony")
"""
def wrapper(*args, **kwargs):
start_time = time.time()
# 调用需要计算运行时间的函数
fun(*args, **kwargs)
end_time = time.time()
running_time = end_time - start_time
h = int(running_time//3600)
m = int((running_time - h*3600)//60)
s = int(running_time%60)
print("time cost: {0}:{1}:{2}".format(h, m, s))
return running_time # -> 可以省略
return wrapper
|
[
"18036834556@163.com"
] |
18036834556@163.com
|
8bf31f37886bfff9512c59b2d24b2699e2383f4b
|
559d7428cba525ddff43a4b03f495c070f382075
|
/Final/FinalExam/Q1/lazy.py
|
0ecd5c3d8b15acde96bbd4735217dc6431ee8534
|
[] |
no_license
|
kwokwill/comp3522-object-oriented-programming
|
9c222ad4d1a2c2420a5eb509f80ba792e94991f6
|
6e2347b70c07cfc3ca83af29c2bd5c4696c55bb6
|
refs/heads/master
| 2023-04-13T19:16:25.546865
| 2021-04-27T21:20:48
| 2021-04-27T21:20:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,260
|
py
|
import time
"""
(4 marks total)
This program simulates loading screens in a game. One of the screens is entered and resources
need to be loaded, the other does not
The code below takes about 1 second (on my machine) to complete.
Requirements
Speed up the code using the LAZY INITIALIZATION design pattern.
Do NOT change any code in the main function
Hints:
The code should run in about half the time after implementing Lazy Initialization
There is no need to use any multithreading/multiprocessing
"""
class Resources:
def __init__(self):
print("Creating resources")
time.sleep(0.5)
def __str__(self):
return "resources available"
class Screen:
def __init__(self, name):
self._name = name
self._resources = None
def enter_screen(self):
if not self._resources:
self._resources = Resources()
return self._resources
def __str__(self):
return self._name
def main():
start_time = time.time()
game_over = Screen("Game over")
print(game_over)
main_menu = Screen("Main menu")
print(main_menu)
print(main_menu.enter_screen())
end_time = time.time()
print("duration:", end_time - start_time)
if __name__ == '__main__':
main()
|
[
"donglmin@icloud.com"
] |
donglmin@icloud.com
|
482c33547adb2af1e66c780d7a9cc833b6182f11
|
c5da0ec1004fcb4283c62f3b2700a5b78dfa1fda
|
/Code/neuro.py
|
02386f5800c9c181a7ba7d26f5b8926616d5cca2
|
[] |
no_license
|
akshayadiga/Optical-Character-Recognition-of-English-Alphabets-using-Neural-Networks
|
78cd63c9d5d5f38bb0fd6de6e003a307712920fe
|
8c040655a72c0fd4dacfa7b358def99d339755d9
|
refs/heads/master
| 2021-01-25T13:07:13.730875
| 2018-03-02T04:58:14
| 2018-03-02T04:58:14
| 123,532,003
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,930
|
py
|
import neurolab as nl
def toProb(letter):
l=[]
int_letter=ord(letter);
pos=int_letter-97
for i in range(26):
if(i==pos):
l.append(1)
else:
l.append(0)
return l
def main():
# X = matrix of m input examples and n features
f=open("letter.data","r")
X=[]
Y=[]
count=0
for line in f:
vector=line.strip().split()
in_vec=vector[6:]
out_vec=vector[1]
in_vec=[int(i) for i in in_vec]
#out_vec=[int(i) for i in out_vec]
X.append(in_vec)
Y.append(out_vec)
count=count+1
if(count==800):
break
#X=numpy.matrix(X)
f.close()
# Y = matrix of m output vectors, where each output vector has k output units
#Y=numpy.matrix(Y)
#print X
#print Y
Y=[toProb(i) for i in Y]
net = nl.net.newff([[0, 1]]*128, [20, 26],transf=[nl.trans.TanSig(),nl.trans.SoftMax()])
net.train(X, Y, epochs=20, show=1, goal=0.02)
#z=net.sim([X[1]])
#print z
f=open("letter.data","r")
X=[]
Y=[]
count=0
for line in f:
if(count<800):
count=count+1
continue
vector=line.strip().split()
in_vec=vector[6:]
out_vec=vector[1]
in_vec=[int(i) for i in in_vec]
#out_vec=[int(i) for i in out_vec]
X.append(in_vec)
Y.append(out_vec)
count=count+1
if(count==1000):
break
z=net.sim(X)
bit_let_pair=zip(X,Y)
b=[i for p,i in bit_let_pair]
correct=0
incorrect=0
let_predict=[]
###change each index to appropriate letter#####
for i in z:
probs =i
prob_letter=max(probs)
for j in range(26):
if(probs[j]==prob_letter):
prob_pos=j
prob_pos+=97
let_predict.append(chr(prob_pos))
#print(let_predict)
#print(b)
################################
for i in range(len(let_predict)):
if(let_predict[i]==bit_let_pair[i][1]):
correct+=1
else:
incorrect+=1
efficiency=correct/(float(correct+incorrect))
print (efficiency*100),"%"
#e = net.train(input, output, show=1, epochs=100, goal=0.0001)
main()
|
[
"akshayadiga@Akshays-MacBook-Pro.local"
] |
akshayadiga@Akshays-MacBook-Pro.local
|
b8572b08870ce01777c59a851f52f3cd3d40ed69
|
ed6dd94781e3022f230050284d2ddd3554cc0772
|
/multithreading/multiprocessing_pipes_conttest.py
|
379999612d2531d37797406abcedc405c450bf1c
|
[] |
no_license
|
Mantabit/python_examples
|
602d4f4237dbc2044d30dc5482e3e2dee4d90fb6
|
516dbb9cc63c7de5bfe7d0e79477dff9ff340a5d
|
refs/heads/master
| 2021-07-04T08:26:38.007606
| 2020-08-17T10:09:04
| 2020-08-17T10:09:04
| 153,170,298
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,224
|
py
|
import multiprocessing as mp
import time
class testClass(object):
def __init__(self,name):
self.name=name
def doSomething(self):
print("Object %s reporting!"%(self.name))
#function receives objects
def receiverFunction(receive_end):
while True:
#receive object from the pipe
try:
obj=receive_end.recv()
except EOFError as err:
print("nothing left in the queue, aborting receiver thread")
break
#use the received object
obj.doSomething()
#function generates objects
def producerFunction(send_end):
start=time.time()
i=0
#produce data every 50ms for 5s
while time.time()-start<1:
i+=1
send_end.send(testClass("Object%d"%(i)))
time.sleep(50e-3)
print("Closing the send_end in producer process...")
send_end.close()
if __name__=="__main__":
(receive_end,send_end)=mp.Pipe()
p_recv=mp.Process(target=receiverFunction,args=[receive_end])
p_send=mp.Process(target=producerFunction,args=[send_end])
p_recv.start()
p_send.start()
p_send.join()
send_end.close()
print("Closing send_end in parent process")
p_recv.join()
|
[
"dvarx@gmx.ch"
] |
dvarx@gmx.ch
|
3100c28087a8a7b53bc1fb5f666abd9059c4956a
|
4fec8ef57c150b088c09a4753e1e9fdb3a2ddabd
|
/yadjangoblog/yaaccounts/apps.py
|
700c03e127494b503dcbc9c698fcf5f424059f4d
|
[] |
no_license
|
ValDon/YaDjangoBlog
|
629c124eb6475a2b1947d4b224b6cdd9473a0490
|
4e6b6453c73470b8f05d062a460962ce118954c3
|
refs/heads/master
| 2020-04-10T22:53:10.000582
| 2019-05-04T15:06:35
| 2019-05-04T15:06:35
| 161,334,986
| 0
| 0
| null | 2018-12-11T13:05:01
| 2018-12-11T13:05:00
| null |
UTF-8
|
Python
| false
| false
| 149
|
py
|
from django.apps import AppConfig
class YaAccountsAppConfig(AppConfig):
name = 'yadjangoblog.yaaccounts'
verbose_name = 'YaAccounts模块'
|
[
"twocucao@gmail.com"
] |
twocucao@gmail.com
|
cee0f8705da747a739d9a3dcea926258152f7f22
|
e8ac7df7a1e067ce002ed12e295e6e1a0908cc8c
|
/Python/ThinkFlask/home/views/index.py
|
9606b3fed92dca1ef2bbb1509bf9f8f1c929cbeb
|
[] |
no_license
|
919858271/MyCode
|
fc9a9e3479843f933582f1030d504d6f5eb94dcb
|
5940b045207376c4c679260a660ca402b5b4751c
|
refs/heads/master
| 2021-06-21T13:59:00.754383
| 2019-07-21T13:28:48
| 2019-07-21T13:28:48
| 53,465,398
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 923
|
py
|
#-------------------------------------------------------------------------------
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: jianwen
# Email: npujianwenxu@163.com
#-------------------------------------------------------------------------------
from flask import render_template
from app.models import Model
from home.model.models import User
from home import home_router
@home_router.route('/')
def index():
return 'Think Flask. This is Home'
@home_router.route('/add/<username>/<password>/')
def add_user(username, password):
user = User(username=username, password=password)
User.insert(user)
return 'success'
@home_router.route('/delete/<int:key>/')
def delete_user(key):
user = User.query.get(key)
Model.delete(user)
return 'success'
@home_router.route('/test/<username>/')
def test(username):
return render_template('home/index.html', username=username)
|
[
"npujianwenxu@163.com"
] |
npujianwenxu@163.com
|
63b17a08ac2f4745e14601141a43ae06dd3014d8
|
e4dfc1402839f277e1e9ff8686dc6b67f1eb0bf0
|
/api_example/languages/urls.py
|
289d6f6c2218819b00280ffb8242142ac2eacf15
|
[] |
no_license
|
punitchauhan771/Sample-Rest-API
|
32ee7a16270a978ac4d82a161fe4fb677c029d36
|
8372982507fa2ee301d3a9de1e6fe9d4b66028ed
|
refs/heads/main
| 2023-02-24T00:49:57.831224
| 2021-01-23T08:23:35
| 2021-01-23T08:23:35
| 331,853,402
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 331
|
py
|
from django.urls import path, include
from . import views
from rest_framework import routers
router = routers.DefaultRouter()
router.register('languages', views.LanguageView)
router.register('Paradigm', views.ParadigmView)
router.register('Programmer',views.ProgrammerView)
urlpatterns = [
path('', include(router.urls))
]
|
[
"chauhanbhupendra980@gmail.com"
] |
chauhanbhupendra980@gmail.com
|
6df0a8249cc984e79381ba0ffcddd3d27403a62b
|
0c72282d601ccf840dd4e41b675c0675de7bc916
|
/students/Jean-Baptiste/lessons/lesson03/assignment03_solution_JB/create_customers.py
|
45516c4640f57281eda038e5e82484c20727fe20
|
[] |
no_license
|
zconn/PythonCert220Assign
|
c7fedd9ffae4f9e74e5e4dfc59bc6c511c7900ab
|
99271cd60485bd2e54f8d133c9057a2ccd6c91c2
|
refs/heads/master
| 2020-04-15T14:42:08.765699
| 2019-03-14T09:13:36
| 2019-03-14T09:13:36
| 164,763,504
| 2
| 0
| null | 2019-01-09T01:34:40
| 2019-01-09T01:34:40
| null |
UTF-8
|
Python
| false
| false
| 360
|
py
|
"""
This is to create database using the Peewee ORM, sqlite and Python
"""
from customers_model import *
import customers_model as cm
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
logger.info('Let us build the classes from the model in the database')
cm.database.create_tables([cm.Customer])
cm.database.close()
|
[
"jbyamindi@yahoo.fr"
] |
jbyamindi@yahoo.fr
|
3129d119bb1773e4909ac9e1ecf759cef0cad06e
|
539789516d0d946e8086444bf4dc6f44d62758c7
|
/inference/python/inference.py
|
7fc210e7978a31556f20ba12d8a1baa22d2ff6c4
|
[] |
no_license
|
hoangcuong2011/etagger
|
ad05ca0c54f007f54f73d39dc539c3737d5acacf
|
611da685d72da207870ddb3dc403b530c859d603
|
refs/heads/master
| 2020-05-03T15:15:33.395186
| 2019-03-28T01:40:21
| 2019-03-28T01:40:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,683
|
py
|
from __future__ import print_function
import sys
import os
path = os.path.dirname(os.path.abspath(__file__)) + '/../..'
sys.path.append(path)
import time
import argparse
import tensorflow as tf
import numpy as np
# for LSTMBlockFusedCell(), https://github.com/tensorflow/tensorflow/issues/23369
tf.contrib.rnn
# for QRNN
try: import qrnn
except: sys.stderr.write('import qrnn, failed\n')
from embvec import EmbVec
from config import Config
from token_eval import TokenEval
from chunk_eval import ChunkEval
from input import Input
def load_frozen_graph(frozen_graph_filename, prefix='prefix'):
with tf.gfile.GFile(frozen_graph_filename, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(
graph_def,
input_map=None,
return_elements=None,
op_dict=None,
producer_op_list=None,
name=prefix,
)
return graph
def inference(config, frozen_pb_path):
"""Inference for bucket
"""
# load graph
graph = load_frozen_graph(frozen_pb_path)
for op in graph.get_operations():
sys.stderr.write(op.name + '\n')
# create session with graph
# if graph is optimized by tensorRT, then
# from tensorflow.contrib import tensorrt as trt
# gpu_ops = tf.GPUOptions(per_process_gpu_memory_fraction = 0.50)
gpu_ops = tf.GPUOptions()
'''
session_conf = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, gpu_options=gpu_ops)
'''
session_conf = tf.ConfigProto(allow_soft_placement=True,
log_device_placement=False,
gpu_options=gpu_ops,
inter_op_parallelism_threads=1,
intra_op_parallelism_threads=1)
sess = tf.Session(graph=graph, config=session_conf)
# mapping placeholders and tensors
p_is_train = graph.get_tensor_by_name('prefix/is_train:0')
p_sentence_length = graph.get_tensor_by_name('prefix/sentence_length:0')
p_input_data_pos_ids = graph.get_tensor_by_name('prefix/input_data_pos_ids:0')
p_input_data_chk_ids = graph.get_tensor_by_name('prefix/input_data_chk_ids:0')
p_input_data_word_ids = graph.get_tensor_by_name('prefix/input_data_word_ids:0')
p_input_data_wordchr_ids = graph.get_tensor_by_name('prefix/input_data_wordchr_ids:0')
t_logits_indices = graph.get_tensor_by_name('prefix/logits_indices:0')
t_sentence_lengths = graph.get_tensor_by_name('prefix/sentence_lengths:0')
num_buckets = 0
total_duration_time = 0.0
bucket = []
while 1:
try: line = sys.stdin.readline()
except KeyboardInterrupt: break
if not line: break
line = line.strip()
if not line and len(bucket) >= 1:
start_time = time.time()
# Build input data
inp = Input(bucket, config, build_output=False)
feed_dict = {p_input_data_pos_ids: inp.example['pos_ids'],
p_input_data_chk_ids: inp.example['chk_ids'],
p_is_train: False,
p_sentence_length: inp.max_sentence_length}
feed_dict[p_input_data_word_ids] = inp.example['word_ids']
feed_dict[p_input_data_wordchr_ids] = inp.example['wordchr_ids']
if 'elmo' in config.emb_class:
feed_dict[p_elmo_input_data_wordchr_ids] = inp.example['elmo_wordchr_ids']
if 'bert' in config.emb_class:
feed_dict[p_bert_input_data_token_ids] = inp.example['bert_token_ids']
feed_dict[p_bert_input_data_token_masks] = inp.example['bert_token_masks']
feed_dict[p_bert_input_data_segment_ids] = inp.example['bert_segment_ids']
if 'elmo' in config.emb_class:
feed_dict[p_bert_input_data_elmo_indices] = inp.example['bert_elmo_indices']
logits_indices, sentence_lengths = sess.run([t_logits_indices, t_sentence_lengths], feed_dict=feed_dict)
tags = config.logit_indices_to_tags(logits_indices[0], sentence_lengths[0])
for i in range(len(bucket)):
if 'bert' in config.emb_class:
j = inp.example['bert_wordidx2tokenidx'][0][i]
out = bucket[i] + ' ' + tags[j]
else:
out = bucket[i] + ' ' + tags[i]
sys.stdout.write(out + '\n')
sys.stdout.write('\n')
bucket = []
duration_time = time.time() - start_time
out = 'duration_time : ' + str(duration_time) + ' sec'
sys.stderr.write(out + '\n')
num_buckets += 1
total_duration_time += duration_time
if line : bucket.append(line)
if len(bucket) != 0:
start_time = time.time()
# Build input data
inp = Input(bucket, config, build_output=False)
feed_dict = {model.input_data_pos_ids: inp.example['pos_ids'],
model.input_data_chk_ids: inp.example['chk_ids'],
model.is_train: False,
model.sentence_length: inp.max_sentence_length}
feed_dict[model.input_data_word_ids] = inp.example['word_ids']
feed_dict[model.input_data_wordchr_ids] = inp.example['wordchr_ids']
if 'elmo' in config.emb_class:
feed_dict[model.elmo_input_data_wordchr_ids] = inp.example['elmo_wordchr_ids']
if 'bert' in config.emb_class:
feed_dict[model.bert_input_data_token_ids] = inp.example['bert_token_ids']
feed_dict[model.bert_input_data_token_masks] = inp.example['bert_token_masks']
feed_dict[model.bert_input_data_segment_ids] = inp.example['bert_segment_ids']
if 'elmo' in config.emb_class:
feed_dict[model.bert_input_data_elmo_indices] = inp.example['bert_elmo_indices']
logits_indices, sentence_lengths = sess.run([t_logits_indices, t_sentence_lengths], feed_dict=feed_dict)
tags = config.logit_indices_to_tags(logits_indices[0], sentence_lengths[0])
for i in range(len(bucket)):
if 'bert' in config.emb_class:
j = inp.example['bert_wordidx2tokenidx'][0][i]
out = bucket[i] + ' ' + tags[j]
else:
out = bucket[i] + ' ' + tags[i]
sys.stdout.write(out + '\n')
sys.stdout.write('\n')
duration_time = time.time() - start_time
out = 'duration_time : ' + str(duration_time) + ' sec'
tf.logging.info(out)
num_buckets += 1
total_duration_time += duration_time
out = 'total_duration_time : ' + str(total_duration_time) + ' sec' + '\n'
out += 'average processing time / bucket : ' + str(total_duration_time / num_buckets) + ' sec'
tf.logging.info(out)
sess.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--emb_path', type=str, help='path to word embedding vector + vocab(.pkl)', required=True)
parser.add_argument('--wrd_dim', type=int, help='dimension of word embedding vector', required=True)
parser.add_argument('--word_length', type=int, default=15, help='max word length')
parser.add_argument('--frozen_path', type=str, help='path to frozen model(ex, ./exported/ner_frozen.pb)', required=True)
args = parser.parse_args()
tf.logging.set_verbosity(tf.logging.INFO)
args.restore = None
config = Config(args, is_training=False, emb_class='glove', use_crf=True)
inference(config, args.frozen_path)
|
[
"hazzling@gmail.com"
] |
hazzling@gmail.com
|
8dba5286a903756c1d25fcc25d34a5e543f90741
|
ac42160440b161365c6f863bd1c89ce8a09570cb
|
/array.py
|
2eeeea45eeabb94107eafd1e45dbd55b677be5d0
|
[] |
no_license
|
vikask1640/Demogit
|
7f12b141af535f95373379a5469573d9c69ad461
|
ad35dcab54b56afbc75578711dac78204ad008b0
|
refs/heads/master
| 2020-04-13T23:32:33.126507
| 2019-01-03T11:00:56
| 2019-01-03T11:00:56
| 163,509,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 258
|
py
|
import array as a
def ace():
vals = a.array("i", [10, 8, 14, 55, 4])
print(vals)
x=list(vals)
x.sort()
print(x)
ace()
# factorilas numbers
y=5
fact=1
for j in range(1,y+1): # 1 to 5
fact=fact*j
print(fact)
|
[
"noreply@github.com"
] |
vikask1640.noreply@github.com
|
3921c679f0f414668848b1d37b5d076edec45b8d
|
5d1441cc173e06fb24c389eb812067a3fc355587
|
/workflow/templatetags/custom.py
|
c9f466a6cebfba8a0db9f818d8958efed3756c15
|
[] |
no_license
|
Johums/ProjectManage
|
2860eb12134d9b522c5a5f2fa4e4054533d9175a
|
22d662e089adab447f247d078c89c670384e78ff
|
refs/heads/master
| 2021-01-10T16:36:47.412675
| 2016-02-27T15:24:13
| 2016-02-27T15:24:13
| 52,213,655
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 641
|
py
|
# -*- coding: utf-8 -*-
from django import template
from django.utils import safestring
register = template.Library()
@register.filter
def multi_menu(menu, move=0):
html_content = """
<ul class="nav nav-pills nav-stacked" style="margin-left: %spx" >
""" % move
for k, v in menu.items():
html_content += """
<li data-toggle="collapse" data-target=".demo">
<a href="{1}"><small>{2}</small></a>
</li>
""".format(*k)
if v:
html_content += multi_menu(v, move + 20)
html_content += """
</ul>
"""
return safestring.mark_safe(html_content)
|
[
"13276915582@163.com"
] |
13276915582@163.com
|
7048a4f70ae9aad9393bb2928bb43fcb1d64edb8
|
8c0bf198a6e0be4128a8615a6944f0a167fc9c79
|
/options.py
|
0cc3ba14fb313fce757adb11cbbf48670d4e23ef
|
[] |
no_license
|
amietn/anna
|
73c43d3a2a9d7f465784ec7adc54f9494b07a178
|
e6e24d2c8252085e8ed69df8da976360b0f43baf
|
refs/heads/master
| 2021-05-03T06:12:09.783335
| 2018-02-07T09:09:18
| 2018-02-07T09:09:18
| 120,590,225
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 436
|
py
|
#!/usr/bin/env python3
import os
import json
irc_credentials_path = os.path.expanduser("~/.config/anna/irc_credentials.json")
def get_irc_credentials():
return get_irc_credentials_path(irc_credentials_path)
def get_irc_credentials_path(path):
with open(path, 'r') as f:
j = json.load(f)
return j
if __name__ == '__main__':
creds = get_irc_credentials_path("irc_credentials.json.template")
print(creds)
|
[
"amietn@foobar"
] |
amietn@foobar
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.