blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
395dd32b36ef6f772a2dc6cf1c9e19d74b164ab5
|
f3ac5816c1a7ba9d36c0673585271a1eaadf23c5
|
/venv/Scripts/pip3-script.py
|
8f255ad495f3e332027192052761abc2cc3fed45
|
[] |
no_license
|
Abhishek-Bhardwaj123/myJarvis
|
47265036b7ffd8de9b32be4508214e381dacee9c
|
b654ecd300cdbb4e0f284c11b589de96d794b9f8
|
refs/heads/master
| 2023-01-11T17:23:13.482325
| 2020-11-11T14:27:27
| 2020-11-11T14:27:27
| 310,311,082
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
#!C:\Users\akku\PycharmProjects\myJarvis\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
|
[
"akkucoder123@gmail.com"
] |
akkucoder123@gmail.com
|
6761e8fe9dbbdf0919c1045ac3ee86a999c1cea5
|
bc6492a9a30ac7228caad91643d58653b49ab9e3
|
/sympy/integrals/rubi/rules/exponential.py
|
90b28d15cf3a1c0f5509ed7a20ff2220e061a60f
|
[] |
no_license
|
cosmosZhou/sagemath
|
2c54ea04868882340c7ef981b7f499fb205095c9
|
0608b946174e86182c6d35d126cd89d819d1d0b8
|
refs/heads/master
| 2023-01-06T07:31:37.546716
| 2020-11-12T06:39:22
| 2020-11-12T06:39:22
| 311,177,322
| 1
| 0
| null | 2020-11-12T06:09:11
| 2020-11-08T23:42:40
|
Python
|
UTF-8
|
Python
| false
| false
| 67,083
|
py
|
'''
This code is automatically generated. Never edit it manually.
For details of generating the code see `rubi_parsing_guide.md` in `parsetools`.
'''
from sympy.external import import_module
matchpy = import_module("matchpy")
from sympy.utilities.decorator import doctest_depends_on
if matchpy:
from matchpy import Pattern, ReplacementRule, CustomConstraint, is_match
from sympy.integrals.rubi.utility_function import (
Int, Sum, Set, With, Module, Scan, MapAnd, FalseQ,
ZeroQ, NegativeQ, NonzeroQ, FreeQ, NFreeQ, List, Log, PositiveQ,
PositiveIntegerQ, NegativeIntegerQ, IntegerQ, IntegersQ,
ComplexNumberQ, PureComplexNumberQ, RealNumericQ, PositiveOrZeroQ,
NegativeOrZeroQ, FractionOrNegativeQ, NegQ, Equal, Unequal, IntPart,
FracPart, RationalQ, ProductQ, SumQ, NonsumQ, Subst, First, Rest,
SqrtNumberQ, SqrtNumberSumQ, LinearQ, Sqrt, ArcCosh, Coefficient,
Denominator, Hypergeometric2F1, Not, Simplify, FractionalPart,
IntegerPart, AppellF1, EllipticPi, EllipticE, EllipticF, ArcTan,
ArcCot, ArcCoth, ArcTanh, ArcSin, ArcSinh, ArcCos, ArcCsc, ArcSec,
ArcCsch, ArcSech, Sinh, Tanh, Cosh, Sech, Csch, Coth, LessEqual, Less,
Greater, GreaterEqual, FractionQ, IntLinearcQ, Expand, IndependentQ,
PowerQ, IntegerPowerQ, PositiveIntegerPowerQ, FractionalPowerQ, AtomQ,
ExpQ, LogQ, Head, MemberQ, TrigQ, SinQ, CosQ, TanQ, CotQ, SecQ, CscQ,
Sin, Cos, Tan, Cot, Sec, Csc, HyperbolicQ, SinhQ, CoshQ, TanhQ, CothQ,
SechQ, CschQ, InverseTrigQ, SinCosQ, SinhCoshQ, LeafCount, Numerator,
NumberQ, NumericQ, Length, ListQ, Im, Re, InverseHyperbolicQ,
InverseFunctionQ, TrigHyperbolicFreeQ, InverseFunctionFreeQ, RealQ,
EqQ, FractionalPowerFreeQ, ComplexFreeQ, PolynomialQ, FactorSquareFree,
PowerOfLinearQ, Exponent, QuadraticQ, LinearPairQ, BinomialParts,
TrinomialParts, PolyQ, EvenQ, OddQ, PerfectSquareQ, NiceSqrtAuxQ,
NiceSqrtQ, Together, PosAux, PosQ, CoefficientList, ReplaceAll,
ExpandLinearProduct, GCD, ContentFactor, NumericFactor,
NonnumericFactors, MakeAssocList, GensymSubst, KernelSubst,
ExpandExpression, Apart, SmartApart, MatchQ,
PolynomialQuotientRemainder, FreeFactors, NonfreeFactors,
RemoveContentAux, RemoveContent, FreeTerms, NonfreeTerms,
ExpandAlgebraicFunction, CollectReciprocals, ExpandCleanup,
AlgebraicFunctionQ, Coeff, LeadTerm, RemainingTerms, LeadFactor,
RemainingFactors, LeadBase, LeadDegree, Numer, Denom, hypergeom, Expon,
MergeMonomials, PolynomialDivide, BinomialQ, TrinomialQ,
GeneralizedBinomialQ, GeneralizedTrinomialQ, FactorSquareFreeList,
PerfectPowerTest, SquareFreeFactorTest, RationalFunctionQ,
RationalFunctionFactors, NonrationalFunctionFactors, Reverse,
RationalFunctionExponents, RationalFunctionExpand, ExpandIntegrand,
SimplerQ, SimplerSqrtQ, SumSimplerQ, BinomialDegree, TrinomialDegree,
CancelCommonFactors, SimplerIntegrandQ, GeneralizedBinomialDegree,
GeneralizedBinomialParts, GeneralizedTrinomialDegree,
GeneralizedTrinomialParts, MonomialQ, MonomialSumQ,
MinimumMonomialExponent, MonomialExponent, LinearMatchQ,
PowerOfLinearMatchQ, QuadraticMatchQ, CubicMatchQ, BinomialMatchQ,
TrinomialMatchQ, GeneralizedBinomialMatchQ, GeneralizedTrinomialMatchQ,
QuotientOfLinearsMatchQ, PolynomialTermQ, PolynomialTerms,
NonpolynomialTerms, PseudoBinomialParts, NormalizePseudoBinomial,
PseudoBinomialPairQ, PseudoBinomialQ, PolynomialGCD, PolyGCD,
AlgebraicFunctionFactors, NonalgebraicFunctionFactors,
QuotientOfLinearsP, QuotientOfLinearsParts, QuotientOfLinearsQ,
Flatten, Sort, AbsurdNumberQ, AbsurdNumberFactors,
NonabsurdNumberFactors, SumSimplerAuxQ, Prepend, Drop,
CombineExponents, FactorInteger, FactorAbsurdNumber,
SubstForInverseFunction, SubstForFractionalPower,
SubstForFractionalPowerOfQuotientOfLinears,
FractionalPowerOfQuotientOfLinears, SubstForFractionalPowerQ,
SubstForFractionalPowerAuxQ, FractionalPowerOfSquareQ,
FractionalPowerSubexpressionQ, Apply, FactorNumericGcd,
MergeableFactorQ, MergeFactor, MergeFactors, TrigSimplifyQ,
TrigSimplify, TrigSimplifyRecur, Order, FactorOrder, Smallest,
OrderedQ, MinimumDegree, PositiveFactors, Sign, NonpositiveFactors,
PolynomialInAuxQ, PolynomialInQ, ExponentInAux, ExponentIn,
PolynomialInSubstAux, PolynomialInSubst, Distrib, DistributeDegree,
FunctionOfPower, DivideDegreesOfFactors, MonomialFactor, FullSimplify,
FunctionOfLinearSubst, FunctionOfLinear, NormalizeIntegrand,
NormalizeIntegrandAux, NormalizeIntegrandFactor,
NormalizeIntegrandFactorBase, NormalizeTogether,
NormalizeLeadTermSigns, AbsorbMinusSign, NormalizeSumFactors,
SignOfFactor, NormalizePowerOfLinear, SimplifyIntegrand, SimplifyTerm,
TogetherSimplify, SmartSimplify, SubstForExpn, ExpandToSum, UnifySum,
UnifyTerms, UnifyTerm, CalculusQ, FunctionOfInverseLinear,
PureFunctionOfSinhQ, PureFunctionOfTanhQ, PureFunctionOfCoshQ,
IntegerQuotientQ, OddQuotientQ, EvenQuotientQ, FindTrigFactor,
FunctionOfSinhQ, FunctionOfCoshQ, OddHyperbolicPowerQ, FunctionOfTanhQ,
FunctionOfTanhWeight, FunctionOfHyperbolicQ, SmartNumerator,
SmartDenominator, SubstForAux, ActivateTrig, ExpandTrig, TrigExpand,
SubstForTrig, SubstForHyperbolic, InertTrigFreeQ, LCM,
SubstForFractionalPowerOfLinear, FractionalPowerOfLinear,
InverseFunctionOfLinear, InertTrigQ, InertReciprocalQ, DeactivateTrig,
FixInertTrigFunction, DeactivateTrigAux, PowerOfInertTrigSumQ,
PiecewiseLinearQ, KnownTrigIntegrandQ, KnownSineIntegrandQ,
KnownTangentIntegrandQ, KnownCotangentIntegrandQ,
KnownSecantIntegrandQ, TryPureTanSubst, TryTanhSubst, TryPureTanhSubst,
AbsurdNumberGCD, AbsurdNumberGCDList, ExpandTrigExpand,
ExpandTrigReduce, ExpandTrigReduceAux, NormalizeTrig, TrigToExp,
ExpandTrigToExp, TrigReduce, FunctionOfTrig, AlgebraicTrigFunctionQ,
FunctionOfHyperbolic, FunctionOfQ, FunctionOfExpnQ, PureFunctionOfSinQ,
PureFunctionOfCosQ, PureFunctionOfTanQ, PureFunctionOfCotQ,
FunctionOfCosQ, FunctionOfSinQ, OddTrigPowerQ, FunctionOfTanQ,
FunctionOfTanWeight, FunctionOfTrigQ, FunctionOfDensePolynomialsQ,
FunctionOfLog, PowerVariableExpn, PowerVariableDegree,
PowerVariableSubst, EulerIntegrandQ, FunctionOfSquareRootOfQuadratic,
SquareRootOfQuadraticSubst, Divides, EasyDQ, ProductOfLinearPowersQ,
Rt, NthRoot, AtomBaseQ, SumBaseQ, NegSumBaseQ, AllNegTermQ,
SomeNegTermQ, TrigSquareQ, RtAux, TrigSquare, IntSum, IntTerm, Map2,
ConstantFactor, SameQ, ReplacePart, CommonFactors,
MostMainFactorPosition, FunctionOfExponentialQ, FunctionOfExponential,
FunctionOfExponentialFunction, FunctionOfExponentialFunctionAux,
FunctionOfExponentialTest, FunctionOfExponentialTestAux, stdev,
rubi_test, If, IntQuadraticQ, IntBinomialQ, RectifyTangent,
RectifyCotangent, Inequality, Condition, Simp, SimpHelp, SplitProduct,
SplitSum, SubstFor, SubstForAux, FresnelS, FresnelC, Erfc, Erfi, Gamma,
FunctionOfTrigOfLinearQ, ElementaryFunctionQ, Complex, UnsameQ,
_SimpFixFactor, SimpFixFactor, _FixSimplify, FixSimplify,
_SimplifyAntiderivativeSum, SimplifyAntiderivativeSum,
_SimplifyAntiderivative, SimplifyAntiderivative, _TrigSimplifyAux,
TrigSimplifyAux, Cancel, Part, PolyLog, D, Dist, Sum_doit, PolynomialQuotient, Floor,
PolynomialRemainder, Factor, PolyLog, CosIntegral, SinIntegral, LogIntegral, SinhIntegral,
CoshIntegral, Rule, Erf, PolyGamma, ExpIntegralEi, ExpIntegralE, LogGamma , UtilityOperator, Factorial,
Zeta, ProductLog, DerivativeDivides, HypergeometricPFQ, IntHide, OneQ, Null, rubi_exp as exp, rubi_log as log, Discriminant,
Negative, Quotient
)
from sympy import (Integral, S, sqrt, And, Or, Integer, Float, Mod, I, Abs, simplify, Mul,
Add, Pow, sign, EulerGamma)
from sympy.integrals.rubi.symbol import WC
from sympy.core.symbol import symbols, Symbol
from sympy.functions import (sin, cos, tan, cot, csc, sec, sqrt, erf)
from sympy.functions.elementary.hyperbolic import (acosh, asinh, atanh, acoth, acsch, asech, cosh, sinh, tanh, coth, sech, csch)
from sympy.functions.elementary.trigonometric import (atan, acsc, asin, acot, acos, asec, atan2)
from sympy import pi as Pi
A_, B_, C_, F_, G_, H_, a_, b_, c_, d_, e_, f_, g_, h_, i_, j_, k_, l_, m_, n_, p_, q_, r_, t_, u_, v_, s_, w_, x_, y_, z_ = [WC(i) for i in 'ABCFGHabcdefghijklmnpqrtuvswxyz']
a1_, a2_, b1_, b2_, c1_, c2_, d1_, d2_, n1_, n2_, e1_, e2_, f1_, f2_, g1_, g2_, n1_, n2_, n3_, Pq_, Pm_, Px_, Qm_, Qr_, Qx_, jn_, mn_, non2_, RFx_, RGx_ = [WC(i) for i in ['a1', 'a2', 'b1', 'b2', 'c1', 'c2', 'd1', 'd2', 'n1', 'n2', 'e1', 'e2', 'f1', 'f2', 'g1', 'g2', 'n1', 'n2', 'n3', 'Pq', 'Pm', 'Px', 'Qm', 'Qr', 'Qx', 'jn', 'mn', 'non2', 'RFx', 'RGx']]
i, ii , Pqq, Q, R, r, C, k, u = symbols('i ii Pqq Q R r C k u')
_UseGamma = False
ShowSteps = False
StepCounter = None
def exponential(rubi):
from sympy.integrals.rubi.constraints import cons31, cons168, cons515, cons1098, cons1099, cons3, cons7, cons27, cons48, cons125, cons208, cons4, cons94, cons17, cons18, cons21, cons1100, cons128, cons2, cons244, cons137, cons552, cons1101, cons1102, cons5, cons380, cons54, cons1103, cons1104, cons1105, cons209, cons224, cons796, cons797, cons50, cons1106, cons804, cons1107, cons812, cons1108, cons1109, cons1110, cons1111, cons584, cons1112, cons1113, cons479, cons480, cons1114, cons196, cons23, cons1115, cons53, cons1116, cons1117, cons1118, cons1119, cons85, cons1120, cons356, cons531, cons1121, cons1122, cons535, cons93, cons1123, cons1124, cons176, cons367, cons166, cons744, cons68, cons840, cons1125, cons1126, cons1127, cons25, cons71, cons1128, cons1129, cons1130, cons818, cons1131, cons1132, cons1133, cons1134, cons819, cons1135, cons1136, cons1137, cons1138, cons148, cons810, cons811, cons1139, cons1140, cons52, cons800, cons1141, cons1142, cons1143, cons813, cons1144, cons226, cons62, cons1145, cons1146, cons1147, cons1148, cons1149, cons1150, cons1151, cons463, cons1152, cons43, cons448, cons1153, cons1154, cons1155, cons1017
pattern1901 = Pattern(Integral((F_**((x_*WC('f', S(1)) + WC('e', S(0)))*WC('g', S(1)))*WC('b', S(1)))**WC('n', S(1))*(x_*WC('d', S(1)) + WC('c', S(0)))**WC('m', S(1)), x_), cons1099, cons3, cons7, cons27, cons48, cons125, cons208, cons4, cons31, cons168, cons515, cons1098)
def replacement1901(m, f, g, b, d, c, n, x, F, e):
rubi.append(1901)
return -Dist(d*m/(f*g*n*log(F)), Int((F**(g*(e + f*x))*b)**n*(c + d*x)**(m + S(-1)), x), x) + Simp((F**(g*(e + f*x))*b)**n*(c + d*x)**m/(f*g*n*log(F)), x)
rule1901 = ReplacementRule(pattern1901, replacement1901)
pattern1902 = Pattern(Integral((F_**((x_*WC('f', S(1)) + WC('e', S(0)))*WC('g', S(1)))*WC('b', S(1)))**WC('n', S(1))*(x_*WC('d', S(1)) + WC('c', S(0)))**m_, x_), cons1099, cons3, cons7, cons27, cons48, cons125, cons208, cons4, cons31, cons94, cons515, cons1098)
def replacement1902(m, f, g, b, d, c, n, x, F, e):
rubi.append(1902)
return -Dist(f*g*n*log(F)/(d*(m + S(1))), Int((F**(g*(e + f*x))*b)**n*(c + d*x)**(m + S(1)), x), x) + Simp((F**(g*(e + f*x))*b)**n*(c + d*x)**(m + S(1))/(d*(m + S(1))), x)
rule1902 = ReplacementRule(pattern1902, replacement1902)
pattern1903 = Pattern(Integral(F_**((x_*WC('f', S(1)) + WC('e', S(0)))*WC('g', S(1)))/(x_*WC('d', S(1)) + WC('c', S(0))), x_), cons1099, cons7, cons27, cons48, cons125, cons208, cons1098)
def replacement1903(f, g, d, c, x, F, e):
rubi.append(1903)
return Simp(F**(g*(-c*f/d + e))*ExpIntegralEi(f*g*(c + d*x)*log(F)/d)/d, x)
rule1903 = ReplacementRule(pattern1903, replacement1903)
pattern1904 = Pattern(Integral(F_**((x_*WC('f', S(1)) + WC('e', S(0)))*WC('g', S(1)))*(x_*WC('d', S(1)) + WC('c', S(0)))**WC('m', S(1)), x_), cons1099, cons7, cons27, cons48, cons125, cons208, cons17)
def replacement1904(m, f, g, d, c, x, F, e):
rubi.append(1904)
return Simp(F**(g*(-c*f/d + e))*f**(-m + S(-1))*g**(-m + S(-1))*(-d)**m*Gamma(m + S(1), -f*g*(c + d*x)*log(F)/d)*log(F)**(-m + S(-1)), x)
rule1904 = ReplacementRule(pattern1904, replacement1904)
pattern1905 = Pattern(Integral(F_**((x_*WC('f', S(1)) + WC('e', S(0)))*WC('g', S(1)))/sqrt(x_*WC('d', S(1)) + WC('c', S(0))), x_), cons1099, cons7, cons27, cons48, cons125, cons208, cons1098)
def replacement1905(f, g, d, c, x, F, e):
rubi.append(1905)
return Dist(S(2)/d, Subst(Int(F**(g*(-c*f/d + e) + f*g*x**S(2)/d), x), x, sqrt(c + d*x)), x)
rule1905 = ReplacementRule(pattern1905, replacement1905)
pattern1906 = Pattern(Integral(F_**((x_*WC('f', S(1)) + WC('e', S(0)))*WC('g', S(1)))*(x_*WC('d', S(1)) + WC('c', S(0)))**m_, x_), cons1099, cons7, cons27, cons48, cons125, cons208, cons21, cons18)
def replacement1906(m, f, g, d, c, x, F, e):
rubi.append(1906)
return -Simp(F**(g*(-c*f/d + e))*(-f*g*log(F)/d)**(-IntPart(m) + S(-1))*(-f*g*(c + d*x)*log(F)/d)**(-FracPart(m))*(c + d*x)**FracPart(m)*Gamma(m + S(1), -f*g*(c + d*x)*log(F)/d)/d, x)
rule1906 = ReplacementRule(pattern1906, replacement1906)
pattern1907 = Pattern(Integral((F_**((x_*WC('f', S(1)) + WC('e', S(0)))*WC('g', S(1)))*WC('b', S(1)))**n_*(x_*WC('d', S(1)) + WC('c', S(0)))**WC('m', S(1)), x_), cons1099, cons3, cons7, cons27, cons48, cons125, cons208, cons21, cons4, cons1100)
def replacement1907(m, f, g, b, d, c, n, x, F, e):
rubi.append(1907)
return Dist(F**(-g*n*(e + f*x))*(F**(g*(e + f*x))*b)**n, Int(F**(g*n*(e + f*x))*(c + d*x)**m, x), x)
rule1907 = ReplacementRule(pattern1907, replacement1907)
pattern1908 = Pattern(Integral((a_ + (F_**((x_*WC('f', S(1)) + WC('e', S(0)))*WC('g', S(1))))**WC('n', S(1))*WC('b', S(1)))**WC('p', S(1))*(x_*WC('d', S(1)) + WC('c', S(0)))**WC('m', S(1)), x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons125, cons208, cons21, cons4, cons128)
def replacement1908(p, m, f, g, b, d, c, n, a, x, F, e):
rubi.append(1908)
return Int(ExpandIntegrand((c + d*x)**m, (a + b*(F**(g*(e + f*x)))**n)**p, x), x)
rule1908 = ReplacementRule(pattern1908, replacement1908)
pattern1909 = Pattern(Integral((x_*WC('d', S(1)) + WC('c', S(0)))**WC('m', S(1))/(a_ + (F_**((x_*WC('f', S(1)) + WC('e', S(0)))*WC('g', S(1))))**WC('n', S(1))*WC('b', S(1))), x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons125, cons208, cons4, cons31, cons168)
def replacement1909(m, f, g, b, d, c, n, a, x, F, e):
rubi.append(1909)
return Dist(d*m/(a*f*g*n*log(F)), Int((c + d*x)**(m + S(-1))*log(a*(F**(g*(e + f*x)))**(-n)/b + S(1)), x), x) - Simp((c + d*x)**m*log(a*(F**(g*(e + f*x)))**(-n)/b + S(1))/(a*f*g*n*log(F)), x)
rule1909 = ReplacementRule(pattern1909, replacement1909)
def With1910(p, m, f, g, b, d, c, n, a, x, F, e):
u = IntHide((a + b*(F**(g*(e + f*x)))**n)**p, x)
rubi.append(1910)
return -Dist(d*m, Int(u*(c + d*x)**(m + S(-1)), x), x) + Dist((c + d*x)**m, u, x)
pattern1910 = Pattern(Integral((a_ + (F_**((x_*WC('f', S(1)) + WC('e', S(0)))*WC('g', S(1))))**WC('n', S(1))*WC('b', S(1)))**p_*(x_*WC('d', S(1)) + WC('c', S(0)))**WC('m', S(1)), x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons125, cons208, cons4, cons244, cons168, cons137)
rule1910 = ReplacementRule(pattern1910, With1910)
pattern1911 = Pattern(Integral(u_**WC('m', S(1))*((F_**(v_*WC('g', S(1))))**WC('n', S(1))*WC('b', S(1)) + WC('a', S(0)))**WC('p', S(1)), x_), cons1099, cons2, cons3, cons208, cons4, cons5, cons552, cons1101, cons1102, cons17)
def replacement1911(v, p, u, m, g, b, a, n, x, F):
rubi.append(1911)
return Int((a + b*(F**(g*ExpandToSum(v, x)))**n)**p*NormalizePowerOfLinear(u, x)**m, x)
rule1911 = ReplacementRule(pattern1911, replacement1911)
def With1912(v, p, u, m, g, b, a, n, x, F):
uu = NormalizePowerOfLinear(u, x)
z = Symbol('z')
z = If(And(PowerQ(uu), FreeQ(Part(uu, S(2)), x)), Part(uu, S(1))**(m*Part(uu, S(2))), uu**m)
z = If(And(PowerQ(uu), FreeQ(Part(uu, 2), x)), Part(uu, 1)**(m*Part(uu, 2)), uu**m)
return Simp(uu**m*Int(z*(a + b*(F**(g*ExpandToSum(v, x)))**n)**p, x)/z, x)
pattern1912 = Pattern(Integral(u_**WC('m', S(1))*((F_**(v_*WC('g', S(1))))**WC('n', S(1))*WC('b', S(1)) + WC('a', S(0)))**WC('p', S(1)), x_), cons1099, cons2, cons3, cons208, cons21, cons4, cons5, cons552, cons1101, cons1102, cons18)
rule1912 = ReplacementRule(pattern1912, With1912)
pattern1913 = Pattern(Integral((a_ + (F_**((x_*WC('f', S(1)) + WC('e', S(0)))*WC('g', S(1))))**WC('n', S(1))*WC('b', S(1)))**WC('p', S(1))*(x_*WC('d', S(1)) + WC('c', S(0)))**WC('m', S(1)), x_), cons2, cons3, cons7, cons27, cons48, cons125, cons208, cons21, cons4, cons5, cons380)
def replacement1913(p, m, f, g, b, d, c, n, a, x, F, e):
rubi.append(1913)
return Int((a + b*(F**(g*(e + f*x)))**n)**p*(c + d*x)**m, x)
rule1913 = ReplacementRule(pattern1913, replacement1913)
pattern1914 = Pattern(Integral((x_*WC('d', S(1)) + WC('c', S(0)))**WC('m', S(1))*(F_**((x_*WC('f', S(1)) + WC('e', S(0)))*WC('g', S(1))))**WC('n', S(1))/(a_ + (F_**((x_*WC('f', S(1)) + WC('e', S(0)))*WC('g', S(1))))**WC('n', S(1))*WC('b', S(1))), x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons125, cons208, cons4, cons31, cons168)
def replacement1914(m, f, g, b, d, c, n, a, x, F, e):
rubi.append(1914)
return -Dist(d*m/(b*f*g*n*log(F)), Int((c + d*x)**(m + S(-1))*log(S(1) + b*(F**(g*(e + f*x)))**n/a), x), x) + Simp((c + d*x)**m*log(S(1) + b*(F**(g*(e + f*x)))**n/a)/(b*f*g*n*log(F)), x)
rule1914 = ReplacementRule(pattern1914, replacement1914)
pattern1915 = Pattern(Integral((x_*WC('d', S(1)) + WC('c', S(0)))**WC('m', S(1))*((F_**((x_*WC('f', S(1)) + WC('e', S(0)))*WC('g', S(1))))**WC('n', S(1))*WC('b', S(1)) + WC('a', S(0)))**WC('p', S(1))*(F_**((x_*WC('f', S(1)) + WC('e', S(0)))*WC('g', S(1))))**WC('n', S(1)), x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons125, cons208, cons21, cons4, cons5, cons54)
def replacement1915(p, m, f, g, b, d, a, n, c, x, F, e):
rubi.append(1915)
return -Dist(d*m/(b*f*g*n*(p + S(1))*log(F)), Int((a + b*(F**(g*(e + f*x)))**n)**(p + S(1))*(c + d*x)**(m + S(-1)), x), x) + Simp((a + b*(F**(g*(e + f*x)))**n)**(p + S(1))*(c + d*x)**m/(b*f*g*n*(p + S(1))*log(F)), x)
rule1915 = ReplacementRule(pattern1915, replacement1915)
pattern1916 = Pattern(Integral((x_*WC('d', S(1)) + WC('c', S(0)))**WC('m', S(1))*((F_**((x_*WC('f', S(1)) + WC('e', S(0)))*WC('g', S(1))))**WC('n', S(1))*WC('b', S(1)) + WC('a', S(0)))**WC('p', S(1))*(F_**((x_*WC('f', S(1)) + WC('e', S(0)))*WC('g', S(1))))**WC('n', S(1)), x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons125, cons208, cons21, cons4, cons5, cons1103)
def replacement1916(p, m, f, g, b, d, a, n, c, x, F, e):
rubi.append(1916)
return Int((a + b*(F**(g*(e + f*x)))**n)**p*(c + d*x)**m*(F**(g*(e + f*x)))**n, x)
rule1916 = ReplacementRule(pattern1916, replacement1916)
pattern1917 = Pattern(Integral((G_**((x_*WC('i', S(1)) + WC('h', S(0)))*WC('j', S(1)))*WC('k', S(1)))**WC('q', S(1))*(x_*WC('d', S(1)) + WC('c', S(0)))**WC('m', S(1))*((F_**((x_*WC('f', S(1)) + WC('e', S(0)))*WC('g', S(1))))**WC('n', S(1))*WC('b', S(1)) + WC('a', S(0)))**WC('p', S(1)), x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons125, cons208, cons209, cons224, cons796, cons797, cons21, cons4, cons5, cons50, cons1104, cons1105)
def replacement1917(p, j, k, m, f, g, b, i, d, G, a, n, c, x, h, q, e, F):
rubi.append(1917)
return Dist((G**(j*(h + i*x))*k)**q*(F**(g*(e + f*x)))**(-n), Int((a + b*(F**(g*(e + f*x)))**n)**p*(c + d*x)**m*(F**(g*(e + f*x)))**n, x), x)
rule1917 = ReplacementRule(pattern1917, replacement1917)
pattern1918 = Pattern(Integral((F_**((x_*WC('b', S(1)) + WC('a', S(0)))*WC('c', S(1))))**WC('n', S(1)), x_), cons1099, cons2, cons3, cons7, cons4, cons1106)
def replacement1918(b, c, n, a, x, F):
rubi.append(1918)
return Simp((F**(c*(a + b*x)))**n/(b*c*n*log(F)), x)
rule1918 = ReplacementRule(pattern1918, replacement1918)
pattern1919 = Pattern(Integral(F_**(v_*WC('c', S(1)))*u_, x_), cons1099, cons7, cons804, cons552, cons1107)
def replacement1919(v, u, c, x, F):
rubi.append(1919)
return Int(ExpandIntegrand(F**(c*ExpandToSum(v, x))*u, x), x)
rule1919 = ReplacementRule(pattern1919, replacement1919)
pattern1920 = Pattern(Integral(F_**(v_*WC('c', S(1)))*u_, x_), cons1099, cons7, cons804, cons552, cons1098)
def replacement1920(v, u, c, x, F):
rubi.append(1920)
return Int(ExpandIntegrand(F**(c*ExpandToSum(v, x)), u, x), x)
rule1920 = ReplacementRule(pattern1920, replacement1920)
pattern1921 = Pattern(Integral(F_**(v_*WC('c', S(1)))*u_**WC('m', S(1))*w_, x_), cons1099, cons7, cons21, cons812, cons1108)
def replacement1921(v, w, u, m, c, x, F):
rubi.append(1921)
return Simp(F**(c*v)*u**(m + S(1))*Coefficient(w, x, S(1))/(c*Coefficient(u, x, S(1))*Coefficient(v, x, S(1))*log(F)), x)
rule1921 = ReplacementRule(pattern1921, replacement1921)
pattern1922 = Pattern(Integral(F_**(v_*WC('c', S(1)))*u_**WC('m', S(1))*w_, x_), cons1099, cons7, cons1109, cons552, cons1101, cons17, cons1107)
def replacement1922(v, w, u, m, c, x, F):
rubi.append(1922)
return Int(ExpandIntegrand(F**(c*ExpandToSum(v, x))*w*NormalizePowerOfLinear(u, x)**m, x), x)
rule1922 = ReplacementRule(pattern1922, replacement1922)
pattern1923 = Pattern(Integral(F_**(v_*WC('c', S(1)))*u_**WC('m', S(1))*w_, x_), cons1099, cons7, cons1109, cons552, cons1101, cons17, cons1098)
def replacement1923(v, w, u, m, c, x, F):
rubi.append(1923)
return Int(ExpandIntegrand(F**(c*ExpandToSum(v, x)), w*NormalizePowerOfLinear(u, x)**m, x), x)
rule1923 = ReplacementRule(pattern1923, replacement1923)
def With1924(v, w, u, m, c, x, F):
uu = NormalizePowerOfLinear(u, x)
z = Symbol('z')
z = If(And(PowerQ(uu), FreeQ(Part(uu, S(2)), x)), Part(uu, S(1))**(m*Part(uu, S(2))), uu**m)
z = If(And(PowerQ(uu), FreeQ(Part(uu, 2), x)), Part(uu, 1)**(m*Part(uu, 2)), uu**m)
return Simp(uu**m*Int(ExpandIntegrand(F**(c*ExpandToSum(v, x))*w*z, x), x)/z, x)
pattern1924 = Pattern(Integral(F_**(v_*WC('c', S(1)))*u_**WC('m', S(1))*w_, x_), cons1099, cons7, cons21, cons1109, cons552, cons1101, cons18)
rule1924 = ReplacementRule(pattern1924, With1924)
pattern1925 = Pattern(Integral(F_**((x_*WC('b', S(1)) + WC('a', S(0)))*WC('c', S(1)))*(e_ + (x_*WC('g', S(1)) + WC('f', S(0)))*WC('h', S(1))*log(x_*WC('d', S(1))))*log(x_*WC('d', S(1)))**WC('n', S(1)), x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons125, cons208, cons209, cons4, cons1110, cons1111, cons584)
def replacement1925(f, b, g, d, c, n, a, x, h, F, e):
rubi.append(1925)
return Simp(F**(c*(a + b*x))*e*x*log(d*x)**(n + S(1))/(n + S(1)), x)
rule1925 = ReplacementRule(pattern1925, replacement1925)
pattern1926 = Pattern(Integral(F_**((x_*WC('b', S(1)) + WC('a', S(0)))*WC('c', S(1)))*x_**WC('m', S(1))*(e_ + (x_*WC('g', S(1)) + WC('f', S(0)))*WC('h', S(1))*log(x_*WC('d', S(1))))*log(x_*WC('d', S(1)))**WC('n', S(1)), x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons125, cons208, cons209, cons21, cons4, cons1112, cons1111, cons584)
def replacement1926(m, f, b, g, d, c, n, a, x, h, F, e):
rubi.append(1926)
return Simp(F**(c*(a + b*x))*e*x**(m + S(1))*log(d*x)**(n + S(1))/(n + S(1)), x)
rule1926 = ReplacementRule(pattern1926, replacement1926)
pattern1927 = Pattern(Integral(F_**((x_*WC('d', S(1)) + WC('c', S(0)))*WC('b', S(1)) + WC('a', S(0))), x_), cons1099, cons2, cons3, cons7, cons27, cons1113)
def replacement1927(b, d, c, a, x, F):
rubi.append(1927)
return Simp(F**(a + b*(c + d*x))/(b*d*log(F)), x)
rule1927 = ReplacementRule(pattern1927, replacement1927)
pattern1928 = Pattern(Integral(F_**((x_*WC('d', S(1)) + WC('c', S(0)))**S(2)*WC('b', S(1)) + WC('a', S(0))), x_), cons1099, cons2, cons3, cons7, cons27, cons479)
def replacement1928(b, d, c, a, x, F):
rubi.append(1928)
return Simp(F**a*sqrt(Pi)*Erfi((c + d*x)*Rt(b*log(F), S(2)))/(S(2)*d*Rt(b*log(F), S(2))), x)
rule1928 = ReplacementRule(pattern1928, replacement1928)
pattern1929 = Pattern(Integral(F_**((x_*WC('d', S(1)) + WC('c', S(0)))**S(2)*WC('b', S(1)) + WC('a', S(0))), x_), cons1099, cons2, cons3, cons7, cons27, cons480)
def replacement1929(b, d, c, a, x, F):
rubi.append(1929)
return Simp(F**a*sqrt(Pi)*Erf((c + d*x)*Rt(-b*log(F), S(2)))/(S(2)*d*Rt(-b*log(F), S(2))), x)
rule1929 = ReplacementRule(pattern1929, replacement1929)
pattern1930 = Pattern(Integral(F_**((x_*WC('d', S(1)) + WC('c', S(0)))**n_*WC('b', S(1)) + WC('a', S(0))), x_), cons1099, cons2, cons3, cons7, cons27, cons1114, cons196)
def replacement1930(b, d, c, a, n, x, F):
rubi.append(1930)
return -Dist(b*n*log(F), Int(F**(a + b*(c + d*x)**n)*(c + d*x)**n, x), x) + Simp(F**(a + b*(c + d*x)**n)*(c + d*x)/d, x)
rule1930 = ReplacementRule(pattern1930, replacement1930)
def With1931(b, d, c, a, n, x, F):
k = Denominator(n)
rubi.append(1931)
return Dist(k/d, Subst(Int(F**(a + b*x**(k*n))*x**(k + S(-1)), x), x, (c + d*x)**(S(1)/k)), x)
pattern1931 = Pattern(Integral(F_**((x_*WC('d', S(1)) + WC('c', S(0)))**n_*WC('b', S(1)) + WC('a', S(0))), x_), cons1099, cons2, cons3, cons7, cons27, cons1114, cons23)
rule1931 = ReplacementRule(pattern1931, With1931)
pattern1932 = Pattern(Integral(F_**((x_*WC('d', S(1)) + WC('c', S(0)))**n_*WC('b', S(1)) + WC('a', S(0))), x_), cons1099, cons2, cons3, cons7, cons27, cons4, cons1115)
def replacement1932(b, d, c, a, n, x, F):
rubi.append(1932)
return -Simp(F**a*(-b*(c + d*x)**n*log(F))**(-S(1)/n)*(c + d*x)*Gamma(S(1)/n, -b*(c + d*x)**n*log(F))/(d*n), x)
rule1932 = ReplacementRule(pattern1932, replacement1932)
pattern1933 = Pattern(Integral(F_**((x_*WC('d', S(1)) + WC('c', S(0)))**n_*WC('b', S(1)) + WC('a', S(0)))*(x_*WC('f', S(1)) + WC('e', S(0)))**WC('m', S(1)), x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons125, cons4, cons53, cons1116)
def replacement1933(m, f, b, d, c, a, n, x, F, e):
rubi.append(1933)
return Simp(F**(a + b*(c + d*x)**n)*(c + d*x)**(-n)*(e + f*x)**n/(b*f*n*log(F)), x)
rule1933 = ReplacementRule(pattern1933, replacement1933)
pattern1934 = Pattern(Integral(F_**((x_*WC('d', S(1)) + WC('c', S(0)))**n_*WC('b', S(1)) + WC('a', S(0)))/(x_*WC('f', S(1)) + WC('e', S(0))), x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons125, cons4, cons1116)
def replacement1934(f, b, d, c, a, n, x, F, e):
rubi.append(1934)
return Simp(F**a*ExpIntegralEi(b*(c + d*x)**n*log(F))/(f*n), x)
rule1934 = ReplacementRule(pattern1934, replacement1934)
pattern1935 = Pattern(Integral(F_**((x_*WC('d', S(1)) + WC('c', S(0)))**n_*WC('b', S(1)) + WC('a', S(0)))*(x_*WC('d', S(1)) + WC('c', S(0)))**WC('m', S(1)), x_), cons1099, cons2, cons3, cons7, cons27, cons21, cons4, cons1117)
def replacement1935(m, b, d, c, a, n, x, F):
rubi.append(1935)
return Dist(S(1)/(d*(m + S(1))), Subst(Int(F**(a + b*x**S(2)), x), x, (c + d*x)**(m + S(1))), x)
rule1935 = ReplacementRule(pattern1935, replacement1935)
pattern1936 = Pattern(Integral(F_**((x_*WC('d', S(1)) + WC('c', S(0)))**n_*WC('b', S(1)) + WC('a', S(0)))*(x_*WC('d', S(1)) + WC('c', S(0)))**WC('m', S(1)), x_), cons1099, cons2, cons3, cons7, cons27, cons31, cons1118, cons1119, cons85, cons1120)
def replacement1936(m, b, d, c, a, n, x, F):
rubi.append(1936)
return -Dist((m - n + S(1))/(b*n*log(F)), Int(F**(a + b*(c + d*x)**n)*(c + d*x)**(m - n), x), x) + Simp(F**(a + b*(c + d*x)**n)*(c + d*x)**(m - n + S(1))/(b*d*n*log(F)), x)
rule1936 = ReplacementRule(pattern1936, replacement1936)
pattern1937 = Pattern(Integral(F_**((x_*WC('d', S(1)) + WC('c', S(0)))**n_*WC('b', S(1)) + WC('a', S(0)))*(x_*WC('d', S(1)) + WC('c', S(0)))**WC('m', S(1)), x_), cons1099, cons2, cons3, cons7, cons27, cons21, cons4, cons1118, cons1119, cons356, cons531)
def replacement1937(m, b, d, c, a, n, x, F):
rubi.append(1937)
return -Dist((m - n + S(1))/(b*n*log(F)), Int(F**(a + b*(c + d*x)**n)*(c + d*x)**(m - n), x), x) + Simp(F**(a + b*(c + d*x)**n)*(c + d*x)**(m - n + S(1))/(b*d*n*log(F)), x)
rule1937 = ReplacementRule(pattern1937, replacement1937)
pattern1938 = Pattern(Integral(F_**((x_*WC('d', S(1)) + WC('c', S(0)))**n_*WC('b', S(1)) + WC('a', S(0)))*(x_*WC('d', S(1)) + WC('c', S(0)))**WC('m', S(1)), x_), cons1099, cons2, cons3, cons7, cons27, cons31, cons1118, cons1121, cons85, cons1122)
def replacement1938(m, b, d, c, a, n, x, F):
rubi.append(1938)
return -Dist(b*n*log(F)/(m + S(1)), Int(F**(a + b*(c + d*x)**n)*(c + d*x)**(m + n), x), x) + Simp(F**(a + b*(c + d*x)**n)*(c + d*x)**(m + S(1))/(d*(m + S(1))), x)
rule1938 = ReplacementRule(pattern1938, replacement1938)
pattern1939 = Pattern(Integral(F_**((x_*WC('d', S(1)) + WC('c', S(0)))**n_*WC('b', S(1)) + WC('a', S(0)))*(x_*WC('d', S(1)) + WC('c', S(0)))**WC('m', S(1)), x_), cons1099, cons2, cons3, cons7, cons27, cons21, cons4, cons1118, cons1121, cons356, cons535)
def replacement1939(m, b, d, c, a, n, x, F):
rubi.append(1939)
return -Dist(b*n*log(F)/(m + S(1)), Int(F**(a + b*(c + d*x)**n)*(c + d*x)**(m + n), x), x) + Simp(F**(a + b*(c + d*x)**n)*(c + d*x)**(m + S(1))/(d*(m + S(1))), x)
rule1939 = ReplacementRule(pattern1939, replacement1939)
def With1940(m, b, d, c, a, n, x, F):
k = Denominator(n)
rubi.append(1940)
return Dist(k/d, Subst(Int(F**(a + b*x**(k*n))*x**(k*(m + S(1)) + S(-1)), x), x, (c + d*x)**(S(1)/k)), x)
pattern1940 = Pattern(Integral(F_**((x_*WC('d', S(1)) + WC('c', S(0)))**n_*WC('b', S(1)) + WC('a', S(0)))*(x_*WC('d', S(1)) + WC('c', S(0)))**WC('m', S(1)), x_), cons1099, cons2, cons3, cons7, cons27, cons93, cons1118, cons1119, cons23)
rule1940 = ReplacementRule(pattern1940, With1940)
pattern1941 = Pattern(Integral(F_**((x_*WC('d', S(1)) + WC('c', S(0)))**n_*WC('b', S(1)) + WC('a', S(0)))*(x_*WC('f', S(1)) + WC('e', S(0)))**WC('m', S(1)), x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons125, cons21, cons4, cons1116, cons1118, cons1123, cons18, cons1124)
def replacement1941(m, f, b, d, c, a, n, x, F, e):
rubi.append(1941)
return Dist((c + d*x)**(-m)*(e + f*x)**m, Int(F**(a + b*(c + d*x)**n)*(c + d*x)**m, x), x)
rule1941 = ReplacementRule(pattern1941, replacement1941)
pattern1942 = Pattern(Integral(F_**((x_*WC('d', S(1)) + WC('c', S(0)))**n_*WC('b', S(1)) + WC('a', S(0)))*(x_*WC('f', S(1)) + WC('e', S(0)))**WC('m', S(1)), x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons125, cons21, cons4, cons1116)
def replacement1942(m, f, b, d, c, a, n, x, F, e):
rubi.append(1942)
return -Simp(F**a*(-b*(c + d*x)**n*log(F))**(-(m + S(1))/n)*(e + f*x)**(m + S(1))*Gamma((m + S(1))/n, -b*(c + d*x)**n*log(F))/(f*n), x)
rule1942 = ReplacementRule(pattern1942, replacement1942)
pattern1943 = Pattern(Integral(F_**((x_*WC('d', S(1)) + WC('c', S(0)))**S(2)*WC('b', S(1)) + WC('a', S(0)))*(x_*WC('f', S(1)) + WC('e', S(0)))**m_, x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons125, cons176, cons367, cons166)
def replacement1943(m, f, b, d, c, a, x, F, e):
rubi.append(1943)
return Dist((-c*f + d*e)/d, Int(F**(a + b*(c + d*x)**S(2))*(e + f*x)**(m + S(-1)), x), x) - Dist(f**S(2)*(m + S(-1))/(S(2)*b*d**S(2)*log(F)), Int(F**(a + b*(c + d*x)**S(2))*(e + f*x)**(m + S(-2)), x), x) + Simp(F**(a + b*(c + d*x)**S(2))*f*(e + f*x)**(m + S(-1))/(S(2)*b*d**S(2)*log(F)), x)
rule1943 = ReplacementRule(pattern1943, replacement1943)
pattern1944 = Pattern(Integral(F_**((x_*WC('d', S(1)) + WC('c', S(0)))**S(2)*WC('b', S(1)) + WC('a', S(0)))*(x_*WC('f', S(1)) + WC('e', S(0)))**m_, x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons125, cons176, cons31, cons94)
def replacement1944(m, f, b, d, c, a, x, F, e):
rubi.append(1944)
return -Dist(S(2)*b*d**S(2)*log(F)/(f**S(2)*(m + S(1))), Int(F**(a + b*(c + d*x)**S(2))*(e + f*x)**(m + S(2)), x), x) + Dist(S(2)*b*d*(-c*f + d*e)*log(F)/(f**S(2)*(m + S(1))), Int(F**(a + b*(c + d*x)**S(2))*(e + f*x)**(m + S(1)), x), x) + Simp(F**(a + b*(c + d*x)**S(2))*(e + f*x)**(m + S(1))/(f*(m + S(1))), x)
rule1944 = ReplacementRule(pattern1944, replacement1944)
pattern1945 = Pattern(Integral(F_**((x_*WC('d', S(1)) + WC('c', S(0)))**n_*WC('b', S(1)) + WC('a', S(0)))*(x_*WC('f', S(1)) + WC('e', S(0)))**m_, x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons125, cons176, cons85, cons744, cons31, cons94)
def replacement1945(m, f, b, d, c, a, n, x, F, e):
rubi.append(1945)
return -Dist(b*d*n*log(F)/(f*(m + S(1))), Int(F**(a + b*(c + d*x)**n)*(c + d*x)**(n + S(-1))*(e + f*x)**(m + S(1)), x), x) + Simp(F**(a + b*(c + d*x)**n)*(e + f*x)**(m + S(1))/(f*(m + S(1))), x)
rule1945 = ReplacementRule(pattern1945, replacement1945)
pattern1946 = Pattern(Integral(F_**(WC('a', S(0)) + WC('b', S(1))/(x_*WC('d', S(1)) + WC('c', S(0))))/(x_*WC('f', S(1)) + WC('e', S(0))), x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons125, cons176)
def replacement1946(f, b, d, c, a, x, F, e):
rubi.append(1946)
return Dist(d/f, Int(F**(a + b/(c + d*x))/(c + d*x), x), x) - Dist((-c*f + d*e)/f, Int(F**(a + b/(c + d*x))/((c + d*x)*(e + f*x)), x), x)
rule1946 = ReplacementRule(pattern1946, replacement1946)
pattern1947 = Pattern(Integral(F_**(WC('a', S(0)) + WC('b', S(1))/(x_*WC('d', S(1)) + WC('c', S(0))))*(x_*WC('f', S(1)) + WC('e', S(0)))**m_, x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons125, cons176, cons17, cons94)
def replacement1947(m, f, b, d, c, a, x, F, e):
rubi.append(1947)
return Dist(b*d*log(F)/(f*(m + S(1))), Int(F**(a + b/(c + d*x))*(e + f*x)**(m + S(1))/(c + d*x)**S(2), x), x) + Simp(F**(a + b/(c + d*x))*(e + f*x)**(m + S(1))/(f*(m + S(1))), x)
rule1947 = ReplacementRule(pattern1947, replacement1947)
pattern1948 = Pattern(Integral(F_**((x_*WC('d', S(1)) + WC('c', S(0)))**n_*WC('b', S(1)) + WC('a', S(0)))/(x_*WC('f', S(1)) + WC('e', S(0))), x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons125, cons4, cons176)
def replacement1948(f, b, d, c, a, n, x, F, e):
rubi.append(1948)
return Int(F**(a + b*(c + d*x)**n)/(e + f*x), x)
rule1948 = ReplacementRule(pattern1948, replacement1948)
pattern1949 = Pattern(Integral(F_**v_*u_**WC('m', S(1)), x_), cons1099, cons21, cons68, cons840, cons1125)
def replacement1949(v, u, m, x, F):
rubi.append(1949)
return Int(F**ExpandToSum(v, x)*ExpandToSum(u, x)**m, x)
rule1949 = ReplacementRule(pattern1949, replacement1949)
pattern1950 = Pattern(Integral(F_**((x_*WC('d', S(1)) + WC('c', S(0)))**n_*WC('b', S(1)) + WC('a', S(0)))*u_, x_), cons1099, cons2, cons3, cons7, cons27, cons4, cons804)
def replacement1950(u, b, d, c, a, n, x, F):
rubi.append(1950)
return Int(ExpandLinearProduct(F**(a + b*(c + d*x)**n), u, c, d, x), x)
rule1950 = ReplacementRule(pattern1950, replacement1950)
pattern1951 = Pattern(Integral(F_**(v_*WC('b', S(1)) + WC('a', S(0)))*WC('u', S(1)), x_), cons1099, cons2, cons3, cons804, cons1126, cons1127)
def replacement1951(v, u, b, a, x, F):
rubi.append(1951)
return Int(F**(a + b*NormalizePowerOfLinear(v, x))*u, x)
rule1951 = ReplacementRule(pattern1951, replacement1951)
pattern1952 = Pattern(Integral(F_**(WC('a', S(0)) + WC('b', S(1))/(x_*WC('d', S(1)) + WC('c', S(0))))/((x_*WC('f', S(1)) + WC('e', S(0)))*(x_*WC('h', S(1)) + WC('g', S(0)))), x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons125, cons1116)
def replacement1952(f, b, g, d, c, a, x, h, F, e):
rubi.append(1952)
return -Dist(d/(f*(-c*h + d*g)), Subst(Int(F**(a + b*d*x/(-c*h + d*g) - b*h/(-c*h + d*g))/x, x), x, (g + h*x)/(c + d*x)), x)
rule1952 = ReplacementRule(pattern1952, replacement1952)
pattern1953 = Pattern(Integral(F_**((x_*WC('b', S(1)) + WC('a', S(0)))*WC('f', S(1))/(x_*WC('d', S(1)) + WC('c', S(0))) + WC('e', S(0)))*(x_*WC('h', S(1)) + WC('g', S(0)))**WC('m', S(1)), x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons125, cons208, cons209, cons21, cons25)
def replacement1953(m, f, b, g, d, c, a, x, h, F, e):
rubi.append(1953)
return Dist(F**(b*f/d + e), Int((g + h*x)**m, x), x)
rule1953 = ReplacementRule(pattern1953, replacement1953)
pattern1954 = Pattern(Integral(F_**((x_*WC('b', S(1)) + WC('a', S(0)))*WC('f', S(1))/(x_*WC('d', S(1)) + WC('c', S(0))) + WC('e', S(0)))*(x_*WC('h', S(1)) + WC('g', S(0)))**WC('m', S(1)), x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons125, cons208, cons209, cons21, cons71, cons1128)
def replacement1954(m, f, b, g, d, c, a, x, h, F, e):
rubi.append(1954)
return Int(F**(-f*(-a*d + b*c)/(d*(c + d*x)) + (b*f + d*e)/d)*(g + h*x)**m, x)
rule1954 = ReplacementRule(pattern1954, replacement1954)
pattern1955 = Pattern(Integral(F_**((x_*WC('b', S(1)) + WC('a', S(0)))*WC('f', S(1))/(x_*WC('d', S(1)) + WC('c', S(0))) + WC('e', S(0)))/(x_*WC('h', S(1)) + WC('g', S(0))), x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons125, cons208, cons209, cons71, cons1129)
def replacement1955(f, b, g, d, c, a, x, h, F, e):
rubi.append(1955)
return Dist(d/h, Int(F**(e + f*(a + b*x)/(c + d*x))/(c + d*x), x), x) - Dist((-c*h + d*g)/h, Int(F**(e + f*(a + b*x)/(c + d*x))/((c + d*x)*(g + h*x)), x), x)
rule1955 = ReplacementRule(pattern1955, replacement1955)
pattern1956 = Pattern(Integral(F_**((x_*WC('b', S(1)) + WC('a', S(0)))*WC('f', S(1))/(x_*WC('d', S(1)) + WC('c', S(0))) + WC('e', S(0)))*(x_*WC('h', S(1)) + WC('g', S(0)))**m_, x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons125, cons208, cons209, cons71, cons1129, cons17, cons94)
def replacement1956(m, f, b, g, d, c, a, x, h, F, e):
rubi.append(1956)
return -Dist(f*(-a*d + b*c)*log(F)/(h*(m + S(1))), Int(F**(e + f*(a + b*x)/(c + d*x))*(g + h*x)**(m + S(1))/(c + d*x)**S(2), x), x) + Simp(F**(e + f*(a + b*x)/(c + d*x))*(g + h*x)**(m + S(1))/(h*(m + S(1))), x)
rule1956 = ReplacementRule(pattern1956, replacement1956)
pattern1957 = Pattern(Integral(F_**((x_*WC('b', S(1)) + WC('a', S(0)))*WC('f', S(1))/(x_*WC('d', S(1)) + WC('c', S(0))) + WC('e', S(0)))/((x_*WC('h', S(1)) + WC('g', S(0)))*(x_*WC('j', S(1)) + WC('i', S(0)))), x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons125, cons208, cons209, cons1128)
def replacement1957(j, f, b, g, i, d, c, a, x, h, F, e):
rubi.append(1957)
return -Dist(d/(h*(-c*j + d*i)), Subst(Int(F**(e - f*x*(-a*d + b*c)/(-c*j + d*i) + f*(-a*j + b*i)/(-c*j + d*i))/x, x), x, (i + j*x)/(c + d*x)), x)
rule1957 = ReplacementRule(pattern1957, replacement1957)
pattern1958 = Pattern(Integral(F_**(x_**S(2)*WC('c', S(1)) + x_*WC('b', S(1)) + WC('a', S(0))), x_), cons1099, cons2, cons3, cons7, cons1130)
def replacement1958(b, c, a, x, F):
rubi.append(1958)
return Dist(F**(a - b**S(2)/(S(4)*c)), Int(F**((b + S(2)*c*x)**S(2)/(S(4)*c)), x), x)
rule1958 = ReplacementRule(pattern1958, replacement1958)
pattern1959 = Pattern(Integral(F_**v_, x_), cons1099, cons818, cons1131)
def replacement1959(v, x, F):
rubi.append(1959)
return Int(F**ExpandToSum(v, x), x)
rule1959 = ReplacementRule(pattern1959, replacement1959)
pattern1960 = Pattern(Integral(F_**(x_**S(2)*WC('c', S(1)) + x_*WC('b', S(1)) + WC('a', S(0)))*(x_*WC('e', S(1)) + WC('d', S(0))), x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons1132)
def replacement1960(b, d, c, a, x, F, e):
rubi.append(1960)
return Simp(F**(a + b*x + c*x**S(2))*e/(S(2)*c*log(F)), x)
rule1960 = ReplacementRule(pattern1960, replacement1960)
pattern1961 = Pattern(Integral(F_**(x_**S(2)*WC('c', S(1)) + x_*WC('b', S(1)) + WC('a', S(0)))*(x_*WC('e', S(1)) + WC('d', S(0)))**m_, x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons1132, cons31, cons166)
def replacement1961(m, b, d, c, a, x, F, e):
rubi.append(1961)
return -Dist(e**S(2)*(m + S(-1))/(S(2)*c*log(F)), Int(F**(a + b*x + c*x**S(2))*(d + e*x)**(m + S(-2)), x), x) + Simp(F**(a + b*x + c*x**S(2))*e*(d + e*x)**(m + S(-1))/(S(2)*c*log(F)), x)
rule1961 = ReplacementRule(pattern1961, replacement1961)
pattern1962 = Pattern(Integral(F_**(x_**S(2)*WC('c', S(1)) + x_*WC('b', S(1)) + WC('a', S(0)))/(x_*WC('e', S(1)) + WC('d', S(0))), x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons1132)
def replacement1962(b, d, c, a, x, F, e):
rubi.append(1962)
return Simp(F**(a - b**S(2)/(S(4)*c))*ExpIntegralEi((b + S(2)*c*x)**S(2)*log(F)/(S(4)*c))/(S(2)*e), x)
rule1962 = ReplacementRule(pattern1962, replacement1962)
pattern1963 = Pattern(Integral(F_**(x_**S(2)*WC('c', S(1)) + x_*WC('b', S(1)) + WC('a', S(0)))*(x_*WC('e', S(1)) + WC('d', S(0)))**m_, x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons1132, cons31, cons94)
def replacement1963(m, b, d, c, a, x, F, e):
rubi.append(1963)
return -Dist(S(2)*c*log(F)/(e**S(2)*(m + S(1))), Int(F**(a + b*x + c*x**S(2))*(d + e*x)**(m + S(2)), x), x) + Simp(F**(a + b*x + c*x**S(2))*(d + e*x)**(m + S(1))/(e*(m + S(1))), x)
rule1963 = ReplacementRule(pattern1963, replacement1963)
pattern1964 = Pattern(Integral(F_**(x_**S(2)*WC('c', S(1)) + x_*WC('b', S(1)) + WC('a', S(0)))*(x_*WC('e', S(1)) + WC('d', S(0))), x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons1133)
def replacement1964(b, d, c, a, x, F, e):
rubi.append(1964)
return -Dist((b*e - S(2)*c*d)/(S(2)*c), Int(F**(a + b*x + c*x**S(2)), x), x) + Simp(F**(a + b*x + c*x**S(2))*e/(S(2)*c*log(F)), x)
rule1964 = ReplacementRule(pattern1964, replacement1964)
pattern1965 = Pattern(Integral(F_**(x_**S(2)*WC('c', S(1)) + x_*WC('b', S(1)) + WC('a', S(0)))*(x_*WC('e', S(1)) + WC('d', S(0)))**m_, x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons1133, cons31, cons166)
def replacement1965(m, b, d, c, a, x, F, e):
rubi.append(1965)
return -Dist((b*e - S(2)*c*d)/(S(2)*c), Int(F**(a + b*x + c*x**S(2))*(d + e*x)**(m + S(-1)), x), x) - Dist(e**S(2)*(m + S(-1))/(S(2)*c*log(F)), Int(F**(a + b*x + c*x**S(2))*(d + e*x)**(m + S(-2)), x), x) + Simp(F**(a + b*x + c*x**S(2))*e*(d + e*x)**(m + S(-1))/(S(2)*c*log(F)), x)
rule1965 = ReplacementRule(pattern1965, replacement1965)
pattern1966 = Pattern(Integral(F_**(x_**S(2)*WC('c', S(1)) + x_*WC('b', S(1)) + WC('a', S(0)))*(x_*WC('e', S(1)) + WC('d', S(0)))**m_, x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons1133, cons31, cons94)
def replacement1966(m, b, d, c, a, x, F, e):
rubi.append(1966)
return -Dist(S(2)*c*log(F)/(e**S(2)*(m + S(1))), Int(F**(a + b*x + c*x**S(2))*(d + e*x)**(m + S(2)), x), x) - Dist((b*e - S(2)*c*d)*log(F)/(e**S(2)*(m + S(1))), Int(F**(a + b*x + c*x**S(2))*(d + e*x)**(m + S(1)), x), x) + Simp(F**(a + b*x + c*x**S(2))*(d + e*x)**(m + S(1))/(e*(m + S(1))), x)
rule1966 = ReplacementRule(pattern1966, replacement1966)
pattern1967 = Pattern(Integral(F_**(x_**S(2)*WC('c', S(1)) + x_*WC('b', S(1)) + WC('a', S(0)))*(x_*WC('e', S(1)) + WC('d', S(0)))**WC('m', S(1)), x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons21, cons1134)
def replacement1967(m, b, d, c, a, x, F, e):
rubi.append(1967)
return Int(F**(a + b*x + c*x**S(2))*(d + e*x)**m, x)
rule1967 = ReplacementRule(pattern1967, replacement1967)
pattern1968 = Pattern(Integral(F_**v_*u_**WC('m', S(1)), x_), cons1099, cons21, cons68, cons818, cons819)
def replacement1968(v, u, m, x, F):
rubi.append(1968)
return Int(F**ExpandToSum(v, x)*ExpandToSum(u, x)**m, x)
rule1968 = ReplacementRule(pattern1968, replacement1968)
def With1969(v, m, b, d, c, a, n, x, F, e):
u = IntHide(F**(e*(c + d*x))*(F**v*b + a)**n, x)
rubi.append(1969)
return -Dist(m, Int(u*x**(m + S(-1)), x), x) + Dist(x**m, u, x)
pattern1969 = Pattern(Integral(F_**((x_*WC('d', S(1)) + WC('c', S(0)))*WC('e', S(1)))*x_**WC('m', S(1))*(F_**v_*WC('b', S(1)) + WC('a', S(0)))**n_, x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons1135, cons31, cons168, cons196)
rule1969 = ReplacementRule(pattern1969, With1969)
def With1970(f, b, g, G, d, c, n, a, x, h, F, e):
if isinstance(x, (int, Integer, float, Float)):
return False
m = FullSimplify(g*h*log(G)/(d*e*log(F)))
if And(RationalQ(m), GreaterEqual(Abs(m), S(1))):
return True
return False
pattern1970 = Pattern(Integral(G_**((x_*WC('g', S(1)) + WC('f', S(0)))*WC('h', S(1)))*(F_**((x_*WC('d', S(1)) + WC('c', S(0)))*WC('e', S(1)))*WC('b', S(1)) + a_)**WC('n', S(1)), x_), cons1099, cons1137, cons2, cons3, cons7, cons27, cons48, cons125, cons208, cons209, cons4, cons1136, CustomConstraint(With1970))
def replacement1970(f, b, g, G, d, c, n, a, x, h, F, e):
m = FullSimplify(g*h*log(G)/(d*e*log(F)))
rubi.append(1970)
return Dist(G**(-c*g*h/d + f*h)*Denominator(m)/(d*e*log(F)), Subst(Int(x**(Numerator(m) + S(-1))*(a + b*x**Denominator(m))**n, x), x, F**(e*(c + d*x)/Denominator(m))), x)
rule1970 = ReplacementRule(pattern1970, replacement1970)
def With1971(f, b, g, G, d, c, n, a, x, h, F, e):
if isinstance(x, (int, Integer, float, Float)):
return False
m = FullSimplify(d*e*log(F)/(g*h*log(G)))
if And(RationalQ(m), Greater(Abs(m), S(1))):
return True
return False
pattern1971 = Pattern(Integral(G_**((x_*WC('g', S(1)) + WC('f', S(0)))*WC('h', S(1)))*(F_**((x_*WC('d', S(1)) + WC('c', S(0)))*WC('e', S(1)))*WC('b', S(1)) + a_)**WC('n', S(1)), x_), cons1099, cons1137, cons2, cons3, cons7, cons27, cons48, cons125, cons208, cons209, cons4, cons1136, CustomConstraint(With1971))
def replacement1971(f, b, g, G, d, c, n, a, x, h, F, e):
m = FullSimplify(d*e*log(F)/(g*h*log(G)))
rubi.append(1971)
return Dist(Denominator(m)/(g*h*log(G)), Subst(Int(x**(Denominator(m) + S(-1))*(F**(c*e - d*e*f/g)*b*x**Numerator(m) + a)**n, x), x, G**(h*(f + g*x)/Denominator(m))), x)
rule1971 = ReplacementRule(pattern1971, replacement1971)
pattern1972 = Pattern(Integral(G_**((x_*WC('g', S(1)) + WC('f', S(0)))*WC('h', S(1)))*(F_**((x_*WC('d', S(1)) + WC('c', S(0)))*WC('e', S(1)))*WC('b', S(1)) + a_)**WC('n', S(1)), x_), cons1099, cons1137, cons2, cons3, cons7, cons27, cons48, cons125, cons208, cons209, cons1138, cons148)
def replacement1972(f, b, g, G, d, c, n, a, x, h, F, e):
rubi.append(1972)
return Int(G**(f*h)*G**(g*h*x)*(F**(c*e)*F**(d*e*x)*b + a)**n, x)
rule1972 = ReplacementRule(pattern1972, replacement1972)
pattern1973 = Pattern(Integral(G_**((x_*WC('g', S(1)) + WC('f', S(0)))*WC('h', S(1)))*(F_**((x_*WC('d', S(1)) + WC('c', S(0)))*WC('e', S(1)))*WC('b', S(1)) + a_)**n_, x_), cons1099, cons1137, cons2, cons3, cons7, cons27, cons48, cons125, cons208, cons209, cons1138, cons196)
def replacement1973(f, b, g, G, d, c, a, n, x, h, F, e):
rubi.append(1973)
return Simp(G**(h*(f + g*x))*a**n*Hypergeometric2F1(-n, g*h*log(G)/(d*e*log(F)), S(1) + g*h*log(G)/(d*e*log(F)), -F**(e*(c + d*x))*b/a)/(g*h*log(G)), x)
rule1973 = ReplacementRule(pattern1973, replacement1973)
pattern1974 = Pattern(Integral(G_**((x_*WC('g', S(1)) + WC('f', S(0)))*WC('h', S(1)))*(F_**((x_*WC('d', S(1)) + WC('c', S(0)))*WC('e', S(1)))*WC('b', S(1)) + a_)**n_, x_), cons1099, cons1137, cons2, cons3, cons7, cons27, cons48, cons125, cons208, cons209, cons4, cons1138, cons23)
def replacement1974(f, b, g, G, d, c, a, n, x, h, F, e):
rubi.append(1974)
return Simp(G**(h*(f + g*x))*(F**(e*(c + d*x))*b + a)**(n + S(1))*Hypergeometric2F1(S(1), n + S(1) + g*h*log(G)/(d*e*log(F)), S(1) + g*h*log(G)/(d*e*log(F)), -F**(e*(c + d*x))*b/a)/(a*g*h*log(G)), x)
rule1974 = ReplacementRule(pattern1974, replacement1974)
pattern1975 = Pattern(Integral(G_**(u_*WC('h', S(1)))*(F_**(v_*WC('e', S(1)))*WC('b', S(1)) + a_)**n_, x_), cons1099, cons1137, cons2, cons3, cons48, cons209, cons4, cons810, cons811)
def replacement1975(v, u, b, G, a, n, x, h, F, e):
rubi.append(1975)
return Int(G**(h*ExpandToSum(u, x))*(F**(e*ExpandToSum(v, x))*b + a)**n, x)
rule1975 = ReplacementRule(pattern1975, replacement1975)
def With1976(t, f, b, g, r, G, d, c, n, a, H, x, h, s, e, F):
if isinstance(x, (int, Integer, float, Float)):
return False
m = FullSimplify((g*h*log(G) + s*t*log(H))/(d*e*log(F)))
if RationalQ(m):
return True
return False
pattern1976 = Pattern(Integral(G_**((x_*WC('g', S(1)) + WC('f', S(0)))*WC('h', S(1)))*H_**((x_*WC('s', S(1)) + WC('r', S(0)))*WC('t', S(1)))*(F_**((x_*WC('d', S(1)) + WC('c', S(0)))*WC('e', S(1)))*WC('b', S(1)) + a_)**WC('n', S(1)), x_), cons1099, cons1137, cons1140, cons2, cons3, cons7, cons27, cons48, cons125, cons208, cons209, cons52, cons800, cons1141, cons4, cons1139, CustomConstraint(With1976))
def replacement1976(t, f, b, g, r, G, d, c, n, a, H, x, h, s, e, F):
m = FullSimplify((g*h*log(G) + s*t*log(H))/(d*e*log(F)))
rubi.append(1976)
return Dist(G**(-c*g*h/d + f*h)*H**(-c*s*t/d + r*t)*Denominator(m)/(d*e*log(F)), Subst(Int(x**(Numerator(m) + S(-1))*(a + b*x**Denominator(m))**n, x), x, F**(e*(c + d*x)/Denominator(m))), x)
rule1976 = ReplacementRule(pattern1976, replacement1976)
pattern1977 = Pattern(Integral(G_**((x_*WC('g', S(1)) + WC('f', S(0)))*WC('h', S(1)))*H_**((x_*WC('s', S(1)) + WC('r', S(0)))*WC('t', S(1)))*(F_**((x_*WC('d', S(1)) + WC('c', S(0)))*WC('e', S(1)))*WC('b', S(1)) + a_)**WC('n', S(1)), x_), cons1099, cons1137, cons1140, cons2, cons3, cons7, cons27, cons48, cons125, cons208, cons209, cons52, cons800, cons1141, cons1142, cons85)
def replacement1977(t, f, b, g, r, G, d, c, n, a, H, x, h, s, e, F):
rubi.append(1977)
return Dist(G**(h*(-c*g/d + f)), Int(H**(t*(r + s*x))*(b + F**(-e*(c + d*x))*a)**n, x), x)
rule1977 = ReplacementRule(pattern1977, replacement1977)
pattern1978 = Pattern(Integral(G_**((x_*WC('g', S(1)) + WC('f', S(0)))*WC('h', S(1)))*H_**((x_*WC('s', S(1)) + WC('r', S(0)))*WC('t', S(1)))*(F_**((x_*WC('d', S(1)) + WC('c', S(0)))*WC('e', S(1)))*WC('b', S(1)) + a_)**WC('n', S(1)), x_), cons1099, cons1137, cons1140, cons2, cons3, cons7, cons27, cons48, cons125, cons208, cons209, cons52, cons800, cons1141, cons1143, cons148)
def replacement1978(t, f, b, g, r, G, d, c, n, a, H, x, h, s, e, F):
rubi.append(1978)
return Int(G**(f*h)*G**(g*h*x)*H**(r*t)*H**(s*t*x)*(F**(c*e)*F**(d*e*x)*b + a)**n, x)
rule1978 = ReplacementRule(pattern1978, replacement1978)
pattern1979 = Pattern(Integral(G_**((x_*WC('g', S(1)) + WC('f', S(0)))*WC('h', S(1)))*H_**((x_*WC('s', S(1)) + WC('r', S(0)))*WC('t', S(1)))*(F_**((x_*WC('d', S(1)) + WC('c', S(0)))*WC('e', S(1)))*WC('b', S(1)) + a_)**n_, x_), cons1099, cons1137, cons1140, cons2, cons3, cons7, cons27, cons48, cons125, cons208, cons209, cons52, cons800, cons1141, cons1143, cons196)
def replacement1979(t, f, b, g, r, G, d, c, a, n, H, x, h, s, e, F):
rubi.append(1979)
return Simp(G**(h*(f + g*x))*H**(t*(r + s*x))*a**n*Hypergeometric2F1(-n, (g*h*log(G) + s*t*log(H))/(d*e*log(F)), S(1) + (g*h*log(G) + s*t*log(H))/(d*e*log(F)), -F**(e*(c + d*x))*b/a)/(g*h*log(G) + s*t*log(H)), x)
rule1979 = ReplacementRule(pattern1979, replacement1979)
pattern1980 = Pattern(Integral(G_**((x_*WC('g', S(1)) + WC('f', S(0)))*WC('h', S(1)))*H_**((x_*WC('s', S(1)) + WC('r', S(0)))*WC('t', S(1)))*(F_**((x_*WC('d', S(1)) + WC('c', S(0)))*WC('e', S(1)))*WC('b', S(1)) + a_)**n_, x_), cons1099, cons1137, cons1140, cons2, cons3, cons7, cons27, cons48, cons125, cons208, cons209, cons52, cons800, cons1141, cons4, cons1143, cons23)
def replacement1980(t, f, b, g, r, G, d, c, a, n, H, x, h, s, e, F):
rubi.append(1980)
return Simp(G**(h*(f + g*x))*H**(t*(r + s*x))*((F**(e*(c + d*x))*b + a)/a)**(-n)*(F**(e*(c + d*x))*b + a)**n*Hypergeometric2F1(-n, (g*h*log(G) + s*t*log(H))/(d*e*log(F)), S(1) + (g*h*log(G) + s*t*log(H))/(d*e*log(F)), -F**(e*(c + d*x))*b/a)/(g*h*log(G) + s*t*log(H)), x)
rule1980 = ReplacementRule(pattern1980, replacement1980)
pattern1981 = Pattern(Integral(G_**(u_*WC('h', S(1)))*H_**(w_*WC('t', S(1)))*(F_**(v_*WC('e', S(1)))*WC('b', S(1)) + a_)**n_, x_), cons1099, cons1137, cons1140, cons2, cons3, cons48, cons209, cons1141, cons4, cons812, cons813)
def replacement1981(v, w, u, t, b, G, a, n, H, x, h, F, e):
rubi.append(1981)
return Int(G**(h*ExpandToSum(u, x))*H**(t*ExpandToSum(w, x))*(F**(e*ExpandToSum(v, x))*b + a)**n, x)
rule1981 = ReplacementRule(pattern1981, replacement1981)
pattern1982 = Pattern(Integral(F_**((x_*WC('d', S(1)) + WC('c', S(0)))*WC('e', S(1)))*(F_**((x_*WC('d', S(1)) + WC('c', S(0)))*WC('e', S(1)))*WC('b', S(1)) + x_**WC('n', S(1))*WC('a', S(1)))**WC('p', S(1)), x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons4, cons5, cons54)
def replacement1982(p, b, d, c, a, n, x, F, e):
rubi.append(1982)
return -Dist(a*n/(b*d*e*log(F)), Int(x**(n + S(-1))*(F**(e*(c + d*x))*b + a*x**n)**p, x), x) + Simp((F**(e*(c + d*x))*b + a*x**n)**(p + S(1))/(b*d*e*(p + S(1))*log(F)), x)
rule1982 = ReplacementRule(pattern1982, replacement1982)
pattern1983 = Pattern(Integral(F_**((x_*WC('d', S(1)) + WC('c', S(0)))*WC('e', S(1)))*x_**WC('m', S(1))*(F_**((x_*WC('d', S(1)) + WC('c', S(0)))*WC('e', S(1)))*WC('b', S(1)) + x_**WC('n', S(1))*WC('a', S(1)))**WC('p', S(1)), x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons21, cons4, cons5, cons54)
def replacement1983(p, m, b, d, c, a, n, x, F, e):
rubi.append(1983)
return -Dist(a*n/(b*d*e*log(F)), Int(x**(m + n + S(-1))*(F**(e*(c + d*x))*b + a*x**n)**p, x), x) - Dist(m/(b*d*e*(p + S(1))*log(F)), Int(x**(m + S(-1))*(F**(e*(c + d*x))*b + a*x**n)**(p + S(1)), x), x) + Simp(x**m*(F**(e*(c + d*x))*b + a*x**n)**(p + S(1))/(b*d*e*(p + S(1))*log(F)), x)
rule1983 = ReplacementRule(pattern1983, replacement1983)
def With1984(v, u, m, f, b, g, c, a, x, F):
q = Rt(-S(4)*a*c + b**S(2), S(2))
rubi.append(1984)
return Dist(S(2)*c/q, Int((f + g*x)**m/(S(2)*F**u*c + b - q), x), x) - Dist(S(2)*c/q, Int((f + g*x)**m/(S(2)*F**u*c + b + q), x), x)
pattern1984 = Pattern(Integral((x_*WC('g', S(1)) + WC('f', S(0)))**WC('m', S(1))/(F_**u_*WC('b', S(1)) + F_**v_*WC('c', S(1)) + WC('a', S(0))), x_), cons1099, cons2, cons3, cons7, cons125, cons208, cons1144, cons68, cons226, cons62)
rule1984 = ReplacementRule(pattern1984, With1984)
def With1985(v, u, m, f, b, g, c, a, x, F):
q = Rt(-S(4)*a*c + b**S(2), S(2))
rubi.append(1985)
return Dist(S(2)*c/q, Int(F**u*(f + g*x)**m/(S(2)*F**u*c + b - q), x), x) - Dist(S(2)*c/q, Int(F**u*(f + g*x)**m/(S(2)*F**u*c + b + q), x), x)
pattern1985 = Pattern(Integral(F_**u_*(x_*WC('g', S(1)) + WC('f', S(0)))**WC('m', S(1))/(F_**u_*WC('b', S(1)) + F_**v_*WC('c', S(1)) + WC('a', S(0))), x_), cons1099, cons2, cons3, cons7, cons125, cons208, cons1144, cons68, cons226, cons62)
rule1985 = ReplacementRule(pattern1985, With1985)
def With1986(v, u, m, f, b, g, i, c, a, x, h, F):
q = Rt(-S(4)*a*c + b**S(2), S(2))
rubi.append(1986)
return -Dist(-i + (-b*i + S(2)*c*h)/q, Int((f + g*x)**m/(S(2)*F**u*c + b + q), x), x) + Dist(i + (-b*i + S(2)*c*h)/q, Int((f + g*x)**m/(S(2)*F**u*c + b - q), x), x)
pattern1986 = Pattern(Integral((F_**u_*WC('i', S(1)) + h_)*(x_*WC('g', S(1)) + WC('f', S(0)))**WC('m', S(1))/(F_**u_*WC('b', S(1)) + F_**v_*WC('c', S(1)) + WC('a', S(0))), x_), cons1099, cons2, cons3, cons7, cons125, cons208, cons209, cons224, cons1144, cons68, cons226, cons62)
rule1986 = ReplacementRule(pattern1986, With1986)
def With1987(v, m, b, d, c, a, x, F):
u = IntHide(S(1)/(F**v*b + F**(c + d*x)*a), x)
rubi.append(1987)
return -Dist(m, Int(u*x**(m + S(-1)), x), x) + Simp(u*x**m, x)
pattern1987 = Pattern(Integral(x_**WC('m', S(1))/(F_**v_*WC('b', S(1)) + F_**(x_*WC('d', S(1)) + WC('c', S(0)))*WC('a', S(1))), x_), cons1099, cons2, cons3, cons7, cons27, cons1145, cons31, cons168)
rule1987 = ReplacementRule(pattern1987, With1987)
pattern1988 = Pattern(Integral(u_/(F_**v_*WC('b', S(1)) + F_**w_*WC('c', S(1)) + a_), x_), cons1099, cons2, cons3, cons7, cons552, cons1146, cons1147, cons1148)
def replacement1988(v, w, u, b, c, a, x, F):
rubi.append(1988)
return Int(F**v*u/(F**(S(2)*v)*b + F**v*a + c), x)
rule1988 = ReplacementRule(pattern1988, replacement1988)
pattern1989 = Pattern(Integral(F_**((x_*WC('e', S(1)) + WC('d', S(0)))**WC('n', S(1))*WC('g', S(1)))/(x_**S(2)*WC('c', S(1)) + x_*WC('b', S(1)) + WC('a', S(0))), x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons208, cons4, cons1149)
def replacement1989(g, b, d, a, n, c, x, F, e):
rubi.append(1989)
return Int(ExpandIntegrand(F**(g*(d + e*x)**n), S(1)/(a + b*x + c*x**S(2)), x), x)
rule1989 = ReplacementRule(pattern1989, replacement1989)
pattern1990 = Pattern(Integral(F_**((x_*WC('e', S(1)) + WC('d', S(0)))**WC('n', S(1))*WC('g', S(1)))/(a_ + x_**S(2)*WC('c', S(1))), x_), cons1099, cons2, cons7, cons27, cons48, cons208, cons4, cons1150)
def replacement1990(g, d, c, n, a, x, F, e):
rubi.append(1990)
return Int(ExpandIntegrand(F**(g*(d + e*x)**n), S(1)/(a + c*x**S(2)), x), x)
rule1990 = ReplacementRule(pattern1990, replacement1990)
pattern1991 = Pattern(Integral(F_**((x_*WC('e', S(1)) + WC('d', S(0)))**WC('n', S(1))*WC('g', S(1)))*u_**WC('m', S(1))/(c_*x_**S(2) + x_*WC('b', S(1)) + WC('a', S(0))), x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons208, cons4, cons804, cons17)
def replacement1991(u, m, g, b, d, a, n, c, x, F, e):
rubi.append(1991)
return Int(ExpandIntegrand(F**(g*(d + e*x)**n), u**m/(a + b*x + c*x**S(2)), x), x)
rule1991 = ReplacementRule(pattern1991, replacement1991)
pattern1992 = Pattern(Integral(F_**((x_*WC('e', S(1)) + WC('d', S(0)))**WC('n', S(1))*WC('g', S(1)))*u_**WC('m', S(1))/(a_ + c_*x_**S(2)), x_), cons1099, cons2, cons7, cons27, cons48, cons208, cons4, cons804, cons17)
def replacement1992(u, m, g, d, a, n, c, x, F, e):
rubi.append(1992)
return Int(ExpandIntegrand(F**(g*(d + e*x)**n), u**m/(a + c*x**S(2)), x), x)
rule1992 = ReplacementRule(pattern1992, replacement1992)
pattern1993 = Pattern(Integral(F_**((x_**S(4)*WC('b', S(1)) + WC('a', S(0)))/x_**S(2)), x_), cons1099, cons2, cons3, cons1151)
def replacement1993(x, a, b, F):
rubi.append(1993)
return -Simp(sqrt(Pi)*Erf((-x**S(2)*sqrt(-b*log(F)) + sqrt(-a*log(F)))/x)*exp(-S(2)*sqrt(-a*log(F))*sqrt(-b*log(F)))/(S(4)*sqrt(-b*log(F))), x) + Simp(sqrt(Pi)*Erf((x**S(2)*sqrt(-b*log(F)) + sqrt(-a*log(F)))/x)*exp(S(2)*sqrt(-a*log(F))*sqrt(-b*log(F)))/(S(4)*sqrt(-b*log(F))), x)
rule1993 = ReplacementRule(pattern1993, replacement1993)
pattern1994 = Pattern(Integral(x_**WC('m', S(1))*(x_**WC('m', S(1)) + exp(x_))**n_, x_), cons93, cons168, cons463, cons1152)
def replacement1994(x, m, n):
rubi.append(1994)
return Dist(m, Int(x**(m + S(-1))*(x**m + exp(x))**n, x), x) + Int((x**m + exp(x))**(n + S(1)), x) - Simp((x**m + exp(x))**(n + S(1))/(n + S(1)), x)
rule1994 = ReplacementRule(pattern1994, replacement1994)
pattern1995 = Pattern(Integral(log(a_ + (F_**((x_*WC('d', S(1)) + WC('c', S(0)))*WC('e', S(1))))**WC('n', S(1))*WC('b', S(1))), x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons4, cons43)
def replacement1995(b, d, c, n, a, x, F, e):
rubi.append(1995)
return Dist(S(1)/(d*e*n*log(F)), Subst(Int(log(a + b*x)/x, x), x, (F**(e*(c + d*x)))**n), x)
rule1995 = ReplacementRule(pattern1995, replacement1995)
pattern1996 = Pattern(Integral(log(a_ + (F_**((x_*WC('d', S(1)) + WC('c', S(0)))*WC('e', S(1))))**WC('n', S(1))*WC('b', S(1))), x_), cons1099, cons2, cons3, cons7, cons27, cons48, cons4, cons448)
def replacement1996(b, d, c, n, a, x, F, e):
rubi.append(1996)
return -Dist(b*d*e*n*log(F), Int(x*(F**(e*(c + d*x)))**n/(a + b*(F**(e*(c + d*x)))**n), x), x) + Simp(x*log(a + b*(F**(e*(c + d*x)))**n), x)
rule1996 = ReplacementRule(pattern1996, replacement1996)
pattern1997 = Pattern(Integral((F_**v_*WC('a', S(1)))**n_*WC('u', S(1)), x_), cons1099, cons2, cons4, cons23)
def replacement1997(v, u, a, n, x, F):
rubi.append(1997)
return Dist(F**(-n*v)*(F**v*a)**n, Int(F**(n*v)*u, x), x)
rule1997 = ReplacementRule(pattern1997, replacement1997)
def With1998(x, u):
v = FunctionOfExponential(u, x)
rubi.append(1998)
return Dist(v/D(v, x), Subst(Int(FunctionOfExponentialFunction(u, x)/x, x), x, v), x)
pattern1998 = Pattern(Integral(u_, x_), cons1153)
rule1998 = ReplacementRule(pattern1998, With1998)
pattern1999 = Pattern(Integral((F_**v_*WC('a', S(1)) + F_**w_*WC('b', S(1)))**n_*WC('u', S(1)), x_), cons1099, cons2, cons3, cons4, cons196, cons1154)
def replacement1999(v, w, u, b, a, n, x, F):
rubi.append(1999)
return Int(F**(n*v)*u*(F**ExpandToSum(-v + w, x)*b + a)**n, x)
rule1999 = ReplacementRule(pattern1999, replacement1999)
pattern2000 = Pattern(Integral((F_**v_*WC('a', S(1)) + G_**w_*WC('b', S(1)))**n_*WC('u', S(1)), x_), cons1099, cons1137, cons2, cons3, cons4, cons196, cons1154)
def replacement2000(v, w, u, b, G, a, n, x, F):
rubi.append(2000)
return Int(F**(n*v)*u*(a + b*exp(ExpandToSum(-v*log(F) + w*log(G), x)))**n, x)
rule2000 = ReplacementRule(pattern2000, replacement2000)
pattern2001 = Pattern(Integral((F_**v_*WC('a', S(1)) + F_**w_*WC('b', S(1)))**n_*WC('u', S(1)), x_), cons1099, cons2, cons3, cons4, cons23, cons1154)
def replacement2001(v, w, u, b, a, n, x, F):
rubi.append(2001)
return Dist(F**(-n*v)*(F**v*a + F**w*b)**n*(F**ExpandToSum(-v + w, x)*b + a)**(-n), Int(F**(n*v)*u*(F**ExpandToSum(-v + w, x)*b + a)**n, x), x)
rule2001 = ReplacementRule(pattern2001, replacement2001)
pattern2002 = Pattern(Integral((F_**v_*WC('a', S(1)) + G_**w_*WC('b', S(1)))**n_*WC('u', S(1)), x_), cons1099, cons1137, cons2, cons3, cons4, cons23, cons1154)
def replacement2002(v, w, u, b, G, a, n, x, F):
rubi.append(2002)
return Dist(F**(-n*v)*(a + b*exp(ExpandToSum(-v*log(F) + w*log(G), x)))**(-n)*(F**v*a + G**w*b)**n, Int(F**(n*v)*u*(a + b*exp(ExpandToSum(-v*log(F) + w*log(G), x)))**n, x), x)
rule2002 = ReplacementRule(pattern2002, replacement2002)
pattern2003 = Pattern(Integral(F_**v_*G_**w_*WC('u', S(1)), x_), cons1099, cons1137, cons1155)
def replacement2003(v, w, u, G, x, F):
rubi.append(2003)
return Int(u*NormalizeIntegrand(exp(v*log(F) + w*log(G)), x), x)
rule2003 = ReplacementRule(pattern2003, replacement2003)
def With2004(v, w, u, y, x, F):
if isinstance(x, (int, Integer, float, Float)):
return False
z = v*y/(D(u, x)*log(F))
if ZeroQ(-w*y + D(z, x)):
return True
return False
pattern2004 = Pattern(Integral(F_**u_*(v_ + w_)*WC('y', S(1)), x_), cons1099, cons1099, CustomConstraint(With2004))
def replacement2004(v, w, u, y, x, F):
z = v*y/(D(u, x)*log(F))
rubi.append(2004)
return Simp(F**u*z, x)
rule2004 = ReplacementRule(pattern2004, replacement2004)
def With2005(v, w, u, n, x, F):
if isinstance(x, (int, Integer, float, Float)):
return False
z = v*D(u, x)*log(F) + (n + S(1))*D(v, x)
if And(Equal(Exponent(w, x), Exponent(z, x)), ZeroQ(w*Coefficient(z, x, Exponent(z, x)) - z*Coefficient(w, x, Exponent(w, x)))):
return True
return False
pattern2005 = Pattern(Integral(F_**u_*v_**WC('n', S(1))*w_, x_), cons1099, cons4, cons804, cons1017, cons1109, CustomConstraint(With2005))
def replacement2005(v, w, u, n, x, F):
z = v*D(u, x)*log(F) + (n + S(1))*D(v, x)
rubi.append(2005)
return Simp(F**u*v**(n + S(1))*Coefficient(w, x, Exponent(w, x))/Coefficient(z, x, Exponent(z, x)), x)
rule2005 = ReplacementRule(pattern2005, replacement2005)
return [rule1901, rule1902, rule1903, rule1904, rule1905, rule1906, rule1907, rule1908, rule1909, rule1910, rule1911, rule1912, rule1913, rule1914, rule1915, rule1916, rule1917, rule1918, rule1919, rule1920, rule1921, rule1922, rule1923, rule1924, rule1925, rule1926, rule1927, rule1928, rule1929, rule1930, rule1931, rule1932, rule1933, rule1934, rule1935, rule1936, rule1937, rule1938, rule1939, rule1940, rule1941, rule1942, rule1943, rule1944, rule1945, rule1946, rule1947, rule1948, rule1949, rule1950, rule1951, rule1952, rule1953, rule1954, rule1955, rule1956, rule1957, rule1958, rule1959, rule1960, rule1961, rule1962, rule1963, rule1964, rule1965, rule1966, rule1967, rule1968, rule1969, rule1970, rule1971, rule1972, rule1973, rule1974, rule1975, rule1976, rule1977, rule1978, rule1979, rule1980, rule1981, rule1982, rule1983, rule1984, rule1985, rule1986, rule1987, rule1988, rule1989, rule1990, rule1991, rule1992, rule1993, rule1994, rule1995, rule1996, rule1997, rule1998, rule1999, rule2000, rule2001, rule2002, rule2003, rule2004, rule2005, ]
|
[
"74498494@qq.com"
] |
74498494@qq.com
|
0e5eea944b35c18d2c872a30df256cb5b55bcd19
|
fd36795a6092e16cdd374fe8367f0f0617a22ea4
|
/tools/bin/pythonSrc/pexpect-4.2/tests/test_command_list_split.py
|
370f46e5b497c2dee4fbe1a1c5965155d9379ff2
|
[
"MIT",
"BSD-4-Clause-UC",
"BSD-3-Clause",
"ISC",
"bzip2-1.0.6",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-4-Clause",
"Artistic-2.0",
"PostgreSQL",
"LicenseRef-scancode-unknown",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-copyleft"
] |
permissive
|
DalavanCloud/hawq
|
eab7d93481173dafb4da42c2d16249310c4c2fc9
|
aea301f532079c3a7f1ccf0a452ed79785a1a3a3
|
refs/heads/master
| 2020-04-29T10:14:22.533820
| 2019-03-15T13:51:53
| 2019-03-16T03:37:51
| 176,054,339
| 1
| 0
|
Apache-2.0
| 2019-03-17T04:02:59
| 2019-03-17T04:02:58
| null |
UTF-8
|
Python
| false
| false
| 1,812
|
py
|
#!/usr/bin/env python
'''
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <noah@noah.org>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
import pexpect
import unittest
from . import PexpectTestCase
class SplitCommandLineTestCase(PexpectTestCase.PexpectTestCase):
def testSplitSizes(self):
assert len(pexpect.split_command_line(r'')) == 0
assert len(pexpect.split_command_line(r'one')) == 1
assert len(pexpect.split_command_line(r'one two')) == 2
assert len(pexpect.split_command_line(r'one two')) == 2
assert len(pexpect.split_command_line(r'one two')) == 2
assert len(pexpect.split_command_line(r'one\ one')) == 1
assert len(pexpect.split_command_line('\'one one\'')) == 1
assert len(pexpect.split_command_line(r'one\"one')) == 1
assert len(pexpect.split_command_line(r'This\' is a\'\ test')) == 3
if __name__ == '__main__':
unittest.main()
suite = unittest.makeSuite(SplitCommandLineTestCase,'test')
|
[
"rlei@pivotal.io"
] |
rlei@pivotal.io
|
46bb827c374c723df2920b4765f45cafad5d8454
|
50402cc4388dfee3a9dbe9e121ef217759ebdba8
|
/demo/testPyQt/test3.py
|
6c42882d89fc51c0eae08cdf2e7c23b542794f04
|
[] |
no_license
|
dqyi11/SVNBackup
|
bd46a69ec55e3a4f981a9bca4c8340944d8d5886
|
9ad38e38453ef8539011cf4d9a9c0a363e668759
|
refs/heads/master
| 2020-03-26T12:15:01.155873
| 2015-12-10T01:11:36
| 2015-12-10T01:11:36
| 144,883,382
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 928
|
py
|
'''
Created on Apr 22, 2014
@author: walter
'''
import sys
from PyQt4 import QtGui
class Example(QtGui.QMainWindow):
def __init__(self):
super(Example, self).__init__()
self.initUI()
def initUI(self):
exitAction = QtGui.QAction(QtGui.QIcon('exit.png'), '&Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit application')
exitAction.triggered.connect(QtGui.qApp.quit)
self.statusBar()
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(exitAction)
self.setGeometry(300, 300, 300, 200)
self.setWindowTitle('Menubar')
self.show()
def main():
app = QtGui.QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
[
"walter@e224401c-0ce2-47f2-81f6-2da1fe30fd39"
] |
walter@e224401c-0ce2-47f2-81f6-2da1fe30fd39
|
439c61b579bf658dc56bef80d40e4ac1594dd4fa
|
162cf24b0e01f0b4a160f651c5e99588d93542af
|
/themoviedb.py
|
1a31f8e9cd5022d9ce0833b1a44ce98ee718497b
|
[] |
no_license
|
dehy/themoviepredictor
|
53ecb61b7166614f9dae53fe8f3477ac27b1ca0d
|
fffd0cca62e29616b9bcb528a88ab0351f6ad287
|
refs/heads/master
| 2023-05-28T00:53:03.228985
| 2019-12-02T13:07:58
| 2019-12-02T13:07:58
| 214,458,996
| 1
| 0
| null | 2023-05-22T22:34:25
| 2019-10-11T14:37:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,372
|
py
|
import requests
import os
from datetime import datetime
from movie import Movie
class TheMovieDB:
ENDPOINT = "api.themoviedb.org/3"
DEFAULT_QUERY_PARAMS = {
"language": "fr-FR"
}
def __init__(self):
api_key = os.environ["TMDB_API_KEY"]
self.query_params = {
"api_key": api_key
}
self.query_params.update(TheMovieDB.DEFAULT_QUERY_PARAMS)
def get_movie(self, imdb_id):
response = requests.get(
f"https://{TheMovieDB.ENDPOINT}/movie/{imdb_id}", params=self.query_params)
if (response.status_code != 200):
print("Error in request")
return None
dict_response = response.json()
return self.movie_from_json(dict_response)
def movie_from_json(self, dict_movie):
title = dict_movie['title']
original_title = dict_movie['original_title']
duration = dict_movie['runtime']
release_date = datetime.strptime(
dict_movie['release_date'], '%Y-%m-%d')
rating = None
synopsis = dict_movie['overview']
production_budget = dict_movie['budget']
movie = Movie(title, original_title, duration, release_date, rating)
movie.synopsis = synopsis
movie.production_budget = production_budget
return movie
|
[
"arnaud@admds.net"
] |
arnaud@admds.net
|
79d50d077c8a2bd6683f80ab6522d551081f014b
|
e4d9d73b13355abdc6f0832b213d17668a5f3e83
|
/AddItem.py
|
b97421f355545458d9aa061988dd8d7bf25d1219
|
[] |
no_license
|
liuhpleon1/MenuApp
|
1a8f49ff5fc68ff5aa05717721104c1927b637df
|
2af5ebf85654b0c9f6d462dc6c55321b9cdf6b55
|
refs/heads/master
| 2021-06-10T08:24:19.073434
| 2017-01-26T10:34:53
| 2017-01-26T10:34:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,368
|
py
|
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from DB_setup import base, Restaurant, Menu
engine = create_engine('sqlite:///restaurant_menu.db')
base.metadata.bind = engine
DBsession = sessionmaker(bind = engine)
session = DBsession()
'''
r1 = Restaurant(name="china king")
session.add(r1)
session.commit()
m1 = Menu(name='orange chicken',course='main',description='very good',
price = '$4',restaurant=r1)
session.add(m1)
session.commit()
first = session.query(Menu).first()
print first.name + first.course + first.description + first.price + first.restaurant.name
'''
#Menu for UrbanBurger
restaurant1 = Restaurant(name = "Urban Burger")
session.add(restaurant1)
session.commit()
menuItem1 = Menu(name = "French Fries", description = "with garlic and parmesan", price = "$2.99", course = "Appetizer", restaurant = restaurant1)
session.add(menuItem1)
session.commit()
menuItem2 = Menu(name = "Chicken Burger", description = "Juicy grilled chicken patty with tomato mayo and lettuce", price = "$5.50", course = "Entree", restaurant = restaurant1)
session.add(menuItem2)
session.commit()
menuItem3 = Menu(name = "Chocolate Cake", description = "fresh baked and served with ice cream", price = "$3.99", course = "Dessert", restaurant = restaurant1)
session.add(menuItem3)
session.commit()
menuItem4 = Menu(name = "Sirloin Burger", description = "Made with grade A beef", price = "$7.99", course = "Entree", restaurant = restaurant1)
session.add(menuItem4)
session.commit()
menuItem5 = Menu(name = "Root Beer", description = "16oz of refreshing goodness", price = "$1.99", course = "Beverage", restaurant = restaurant1)
session.add(menuItem5)
session.commit()
menuItem6 = Menu(name = "Iced Tea", description = "with Lemon", price = "$.99", course = "Beverage", restaurant = restaurant1)
session.add(menuItem6)
session.commit()
menuItem7 = Menu(name = "Grilled Cheese Sandwich", description = "On texas toast with American Cheese", price = "$3.49", course = "Entree", restaurant = restaurant1)
session.add(menuItem7)
session.commit()
menuItem8 = Menu(name = "Veggie Burger", description = "Made with freshest of ingredients and home grown spices", price = "$5.99", course = "Entree", restaurant = restaurant1)
session.add(menuItem8)
session.commit()
#Menu for Super Stir Fry
restaurant2 = Restaurant(name = "Super Stir Fry")
session.add(restaurant2)
session.commit()
menuItem1 = Menu(name = "Chicken Stir Fry", description = "With your choice of noodles vegetables and sauces", price = "$7.99", course = "Entree", restaurant = restaurant2)
session.add(menuItem1)
session.commit()
menuItem2 = Menu(name = "Peking Duck", description = " A famous duck dish from Beijing[1] that has been prepared since the imperial era. The meat is prized for its thin, crisp skin, with authentic versions of the dish serving mostly the skin and little meat, sliced in front of the diners by the cook", price = "$25", course = "Entree", restaurant = restaurant2)
session.add(menuItem2)
session.commit()
menuItem3 = Menu(name = "Spicy Tuna Roll", description = "Seared rare ahi, avocado, edamame, cucumber with wasabi soy sauce ", price = "15", course = "Entree", restaurant = restaurant2)
session.add(menuItem3)
session.commit()
menuItem4 = Menu(name = "Nepali Momo ", description = "Steamed dumplings made with vegetables, spices and meat. ", price = "12", course = "Entree", restaurant = restaurant2)
session.add(menuItem4)
session.commit()
menuItem5 = Menu(name = "Beef Noodle Soup", description = "A Chinese noodle soup made of stewed or red braised beef, beef broth, vegetables and Chinese noodles.", price = "14", course = "Entree", restaurant = restaurant2)
session.add(menuItem5)
session.commit()
menuItem6 = Menu(name = "Ramen", description = "a Japanese noodle soup dish. It consists of Chinese-style wheat noodles served in a meat- or (occasionally) fish-based broth, often flavored with soy sauce or miso, and uses toppings such as sliced pork, dried seaweed, kamaboko, and green onions.", price = "12", course = "Entree", restaurant = restaurant2)
session.add(menuItem6)
session.commit()
#Menu for Panda Garden
restaurant1 = Restaurant(name = "Panda Garden")
session.add(restaurant1)
session.commit()
menuItem1 = Menu(name = "Pho", description = "a Vietnamese noodle soup consisting of broth, linguine-shaped rice noodles called banh pho, a few herbs, and meat.", price = "$8.99", course = "Entree", restaurant = restaurant1)
session.add(menuItem1)
session.commit()
menuItem2 = Menu(name = "Chinese Dumplings", description = "a common Chinese dumpling which generally consists of minced meat and finely chopped vegetables wrapped into a piece of dough skin. The skin can be either thin and elastic or thicker.", price = "$6.99", course = "Appetizer", restaurant = restaurant1)
session.add(menuItem2)
session.commit()
menuItem3 = Menu(name = "Gyoza", description = "The most prominent differences between Japanese-style gyoza and Chinese-style jiaozi are the rich garlic flavor, which is less noticeable in the Chinese version, the light seasoning of Japanese gyoza with salt and soy sauce, and the fact that gyoza wrappers are much thinner", price = "$9.95", course = "Entree", restaurant = restaurant1)
session.add(menuItem3)
session.commit()
menuItem4 = Menu(name = "Stinky Tofu", description = "Taiwanese dish, deep fried fermented tofu served with pickled cabbage.", price = "$6.99", course = "Entree", restaurant = restaurant1)
session.add(menuItem4)
session.commit()
menuItem5 = Menu(name = "Veggie Burger", description = "Juicy grilled veggie patty with tomato mayo and lettuce", price = "$9.50", course = "Entree", restaurant = restaurant1)
session.add(menuItem2)
session.commit()
#Menu for Thyme for that
restaurant1 = Restaurant(name = "Thyme for That Vegetarian Cuisine ")
session.add(restaurant1)
session.commit()
menuItem1 = Menu(name = "Tres Leches Cake", description = "Rich, luscious sponge cake soaked in sweet milk and topped with vanilla bean whipped cream and strawberries.", price = "$2.99", course = "Dessert", restaurant = restaurant1)
session.add(menuItem1)
session.commit()
menuItem2 = Menu(name = "Mushroom risotto", description = "Portabello mushrooms in a creamy risotto", price = "$5.99", course = "Entree", restaurant = restaurant1)
session.add(menuItem2)
session.commit()
menuItem3 = Menu(name = "Honey Boba Shaved Snow", description = "Milk snow layered with honey boba, jasmine tea jelly, grass jelly, caramel, cream, and freshly made mochi", price = "$4.50", course = "Dessert", restaurant = restaurant1)
session.add(menuItem3)
session.commit()
menuItem4 = Menu(name = "Cauliflower Manchurian", description = "Golden fried cauliflower florets in a midly spiced soya,garlic sauce cooked with fresh cilantro, celery, chilies,ginger & green onions", price = "$6.95", course = "Appetizer", restaurant = restaurant1)
session.add(menuItem4)
session.commit()
menuItem5 = Menu(name = "Aloo Gobi Burrito", description = "Vegan goodness. Burrito filled with rice, garbanzo beans, curry sauce, potatoes (aloo), fried cauliflower (gobi) and chutney. Nom Nom", price = "$7.95", course = "Entree", restaurant = restaurant1)
session.add(menuItem5)
session.commit()
menuItem2 = Menu(name = "Veggie Burger", description = "Juicy grilled veggie patty with tomato mayo and lettuce", price = "$6.80", course = "Entree", restaurant = restaurant1)
session.add(menuItem2)
session.commit()
#Menu for Tony's Bistro
restaurant1 = Restaurant(name = "Tony\'s Bistro ")
session.add(restaurant1)
session.commit()
menuItem1 = Menu(name = "Shellfish Tower", description = "Lobster, shrimp, sea snails, crawfish, stacked into a delicious tower", price = "$13.95", course = "Entree", restaurant = restaurant1)
session.add(menuItem1)
session.commit()
menuItem2 = Menu(name = "Chicken and Rice", description = "Chicken... and rice", price = "$4.95", course = "Entree", restaurant = restaurant1)
session.add(menuItem2)
session.commit()
menuItem3 = Menu(name = "Mom's Spaghetti", description = "Spaghetti with some incredible tomato sauce made by mom", price = "$6.95", course = "Entree", restaurant = restaurant1)
session.add(menuItem3)
session.commit()
menuItem4 = Menu(name = "Choc Full O\' Mint (Smitten\'s Fresh Mint Chip ice cream)", description = "Milk, cream, salt, ..., Liquid nitrogen magic", price = "$3.95", course = "Dessert", restaurant = restaurant1)
session.add(menuItem4)
session.commit()
menuItem5 = Menu(name = "Tonkatsu Ramen", description = "Noodles in a delicious pork-based broth with a soft-boiled egg", price = "$7.95", course = "Entree", restaurant = restaurant1)
session.add(menuItem5)
session.commit()
#Menu for Andala's
restaurant1 = Restaurant(name = "Andala\'s")
session.add(restaurant1)
session.commit()
menuItem1 = Menu(name = "Lamb Curry", description = "Slow cook that thang in a pool of tomatoes, onions and alllll those tasty Indian spices. Mmmm.", price = "$9.95", course = "Entree", restaurant = restaurant1)
session.add(menuItem1)
session.commit()
menuItem2 = Menu(name = "Chicken Marsala", description = "Chicken cooked in Marsala wine sauce with mushrooms", price = "$7.95", course = "Entree", restaurant = restaurant1)
session.add(menuItem2)
session.commit()
menuItem3 = Menu(name = "Potstickers", description = "Delicious chicken and veggies encapsulated in fried dough.", price = "$6.50", course = "Appetizer", restaurant = restaurant1)
session.add(menuItem3)
session.commit()
menuItem4 = Menu(name = "Nigiri Sampler", description = "Maguro, Sake, Hamachi, Unagi, Uni, TORO!", price = "$6.75", course = "Appetizer", restaurant = restaurant1)
session.add(menuItem4)
session.commit()
menuItem2 = Menu(name = "Veggie Burger", description = "Juicy grilled veggie patty with tomato mayo and lettuce", price = "$7.00", course = "Entree", restaurant = restaurant1)
session.add(menuItem2)
session.commit()
#Menu for Auntie Ann's
restaurant1 = Restaurant(name = "Auntie Ann\'s Diner' ")
session.add(restaurant1)
session.commit()
menuItem9 = Menu(name = "Chicken Fried Steak", description = "Fresh battered sirloin steak fried and smothered with cream gravy", price = "$8.99", course = "Entree", restaurant = restaurant1)
session.add(menuItem9)
session.commit()
menuItem1 = Menu(name = "Boysenberry Sorbet", description = "An unsettlingly huge amount of ripe berries turned into frozen (and seedless) awesomeness", price = "$2.99", course = "Dessert", restaurant = restaurant1)
session.add(menuItem1)
session.commit()
menuItem2 = Menu(name = "Broiled salmon", description = "Salmon fillet marinated with fresh herbs and broiled hot & fast", price = "$10.95", course = "Entree", restaurant = restaurant1)
session.add(menuItem2)
session.commit()
menuItem3 = Menu(name = "Morels on toast (seasonal)", description = "Wild morel mushrooms fried in butter, served on herbed toast slices", price = "$7.50", course = "Appetizer", restaurant = restaurant1)
session.add(menuItem3)
session.commit()
menuItem4 = Menu(name = "Tandoori Chicken", description = "Chicken marinated in yoghurt and seasoned with a spicy mix(chilli, tamarind among others) and slow cooked in a cylindrical clay or metal oven which gets its heat from burning charcoal.", price = "$8.95", course = "Entree", restaurant = restaurant1)
session.add(menuItem4)
session.commit()
menuItem2 = Menu(name = "Veggie Burger", description = "Juicy grilled veggie patty with tomato mayo and lettuce", price = "$9.50", course = "Entree", restaurant = restaurant1)
session.add(menuItem2)
session.commit()
menuItem10 = Menu(name = "Spinach Ice Cream", description = "vanilla ice cream made with organic spinach leaves", price = "$1.99", course = "Dessert", restaurant = restaurant1)
session.add(menuItem10)
session.commit()
#Menu for Cocina Y Amor
restaurant1 = Restaurant(name = "Cocina Y Amor ")
session.add(restaurant1)
session.commit()
menuItem1 = Menu(name = "Super Burrito Al Pastor", description = "Marinated Pork, Rice, Beans, Avocado, Cilantro, Salsa, Tortilla", price = "$5.95", course = "Entree", restaurant = restaurant1)
session.add(menuItem1)
session.commit()
menuItem2 = Menu(name = "Cachapa", description = "Golden brown, corn-based Venezuelan pancake; usually stuffed with queso telita or queso de mano, and possibly lechon. ", price = "$7.99", course = "Entree", restaurant = restaurant1)
session.add(menuItem2)
session.commit()
restaurant1 = Restaurant(name = "State Bird Provisions")
session.add(restaurant1)
session.commit()
menuItem1 = Menu(name = "Chantrelle Toast", description = "Crispy Toast with Sesame Seeds slathered with buttery chantrelle mushrooms", price = "$5.95", course = "Appetizer", restaurant = restaurant1)
session.add(menuItem1)
session.commit
menuItem1 = Menu(name = "Guanciale Chawanmushi", description = "Japanese egg custard served hot with spicey Italian Pork Jowl (guanciale)", price = "$6.95", course = "Dessert", restaurant = restaurant1)
session.add(menuItem1)
session.commit()
menuItem1 = Menu(name = "Lemon Curd Ice Cream Sandwich", description = "Lemon Curd Ice Cream Sandwich on a chocolate macaron with cardamom meringue and cashews", price = "$4.25", course = "Dessert", restaurant = restaurant1)
session.add(menuItem1)
session.commit()
print "added menu items!"
|
[
"liuhpleon@gmail.com"
] |
liuhpleon@gmail.com
|
dfea9376ae4c7e58a2f41516ce24d2bbfe05aad8
|
bf75749c1efe2ca8a18e7c2f2dc0053f24278523
|
/amazon_beta/run.py
|
23f7589005b4864d4aae9262002d7596d60e30d2
|
[] |
no_license
|
idxyz/amazon_beta
|
c65919a0606c5890341a681f6f9d642913415049
|
8d73ba416742fb3bb5b8183f82c80d045e9e0e90
|
refs/heads/master
| 2020-03-17T20:14:45.504631
| 2018-05-18T03:58:12
| 2018-05-18T03:58:12
| 133,900,078
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 73
|
py
|
from scrapy import cmdline
cmdline.execute("scrapy crawl amazon".split())
|
[
"2388338364@qq.com"
] |
2388338364@qq.com
|
3342301bfd7d37e0fd63b563303dec2e70d33013
|
ad3bc555fea58619a4260b8fa177260f8a563622
|
/web-app/src/qa/management/commands/evaluate_command.py
|
9a371fdc24a3733d41f51a60c53f6a4f0626110f
|
[] |
no_license
|
UrosOgrizovic/FitBot
|
4ebbd4aa58fbc794b922b11b94a2ff7c14e2894e
|
414f6d9fb2915ccdba4caf65e6cda8a8ddff10dd
|
refs/heads/main
| 2023-06-12T01:12:11.989926
| 2021-06-27T21:17:44
| 2021-06-27T21:17:44
| 320,332,369
| 3
| 0
| null | 2021-06-27T21:17:45
| 2020-12-10T16:44:50
|
Python
|
UTF-8
|
Python
| false
| false
| 1,975
|
py
|
from django.core.management.base import BaseCommand
from src.qa.apps import QaConfig
from difflib import SequenceMatcher
ROBERTA_BASE_MODEL = "deepset/roberta-base-squad2"
MINILM_UNCASED_MODEL = "deepset/minilm-uncased-squad2"
class Command(BaseCommand):
def read_evaluation_data(self, path="evaluation_data.txt"):
questions, answers, contexts = [], [], []
with open(path, "r") as f:
content = f.readlines()[1:] # skip header
for line in content:
question, answer, context = line.split("|")
questions.append(question)
answers.append(answer)
contexts.append({"text": context})
return questions, answers, contexts
def get_similarities(self, questions, answers):
similarities = []
for i, question in enumerate(questions):
print(question)
prediction = QaConfig.pipe.run(query=question, top_k_retriever=3, top_k_reader=3)
best_answer_obj = prediction['answers'][0]
best_answer_txt = best_answer_obj['answer']
best_answer_context = best_answer_obj['context']
print('predicted ans', best_answer_txt, 'actual ans', answers[i])
similarity = SequenceMatcher(None, best_answer_txt, answers[i]).ratio()
if len(best_answer_txt.split(" ")) > 2 and (best_answer_txt in answers[i] or answers[i] in best_answer_txt):
# increase similarity for exact matches
similarity = min(1.0, similarity + 0.3)
similarities.append(
{'similarity': similarity, 'txt': best_answer_txt,
'context': best_answer_context})
return similarities
def handle(self, *args, **kwargs):
questions, answers, contexts = self.read_evaluation_data()
similarities = self.get_similarities(questions, answers)
print('similarities', [sim['similarity'] for sim in similarities])
|
[
"uros.ogrizovic671@gmail.com"
] |
uros.ogrizovic671@gmail.com
|
277ca5faf223fee0254b99c950487e402e63cb75
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_untruth.py
|
f82b360189c64e1e50c3124e3fe683bea2162f45
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 354
|
py
|
#calss header
class _UNTRUTH():
def __init__(self,):
self.name = "UNTRUTH"
self.definitions = [u'a statement that is not true: ', u'the fact that something is not true: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
40a5480e0c7e0e26131935de54d5a4d2e8f6849a
|
7fcad6f99142f2b182c64a39de09c3654c245c0f
|
/application/models.py
|
ae6a43b658707fbaed75716c61644644fbc263e8
|
[] |
no_license
|
paritakumbhani/Django-Project
|
2525d778009c549fc4234152f9047824f1a4a184
|
e008a5cd8555ba5443322dab92065b9c4aaecd9b
|
refs/heads/master
| 2022-12-21T01:44:14.947111
| 2020-10-04T20:03:06
| 2020-10-04T20:03:06
| 300,383,825
| 0
| 1
| null | 2020-10-01T18:38:32
| 2020-10-01T18:30:37
|
Python
|
UTF-8
|
Python
| false
| false
| 1,027
|
py
|
from django.db import models
class Cheese(models.Model):
CheeseId = models.CharField(max_length=10)
CheeseNameEn = models.CharField(max_length=100)
ManufacturerNameEn = models.CharField(max_length=300)
ManufacturerProvCode = models.CharField(max_length=2)
ManufacturingTypeEn = models.CharField(max_length=50)
WebSiteEn = models.CharField(max_length=300)
FatContentPercent = models.CharField(max_length=50)
MoisturePercent = models.CharField(max_length=50)
ParticularitiesEn = models.CharField(max_length=300)
FlavourEn = models.CharField(max_length=200)
CharacteristicsEn = models.CharField(max_length=500)
RipeningEn = models.CharField(max_length=50)
Organic = models.CharField(max_length=2)
CategoryTypeEn = models.CharField(max_length=200)
MilkTypeEn = models.CharField(max_length=50)
MilkTreatmentTypeEn = models.CharField(max_length=50)
RindTypeEn = models.CharField(max_length=50)
LastUpdateDate = models.CharField(max_length=10)
|
[
"kumb0004@algonquinlive.com"
] |
kumb0004@algonquinlive.com
|
bb850d9a30bc515b9c746ee76ca04aa170f39573
|
d5103360fde7a11dbd12f35a09f2f242c14ae47a
|
/character.py
|
06e5a77f492e9a6ae43b0a714ac40d7d5091dcd0
|
[
"MIT"
] |
permissive
|
dluiscosta/pythongame
|
78051fd26079d5fad6d911b63b141b25dffb5877
|
a3a2b894bc70e4fc1290fc82871759edea6bdb19
|
refs/heads/master
| 2020-04-18T06:05:31.962435
| 2019-02-11T01:53:38
| 2019-02-11T01:53:38
| 167,305,744
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,822
|
py
|
import pygame as pg
import numpy as np
import aux
class Character:
@classmethod
def generate_characters(cls, cel_size, mov_handler):
# Builds characters
cls.chars = []
chars_to_use = range(1, 9)
char_offset = (-(0.1*cel_size)/2,-(0.75*cel_size))
for char_n in chars_to_use:
char_sprites = {}
for dir in [pg.K_UP, pg.K_RIGHT, pg.K_DOWN, pg.K_LEFT]:
dir_str = {pg.K_UP:'up', pg.K_RIGHT:'right', pg.K_DOWN:'down', pg.K_LEFT:'left'}[dir]
path = 'img/characters/' + str(char_n) + '/'
idle_sprite = pg.image.load(path + dir_str + '_' + aux.to_n_digits(2, 2) + '.png')
idle_sprite = pg.transform.scale(idle_sprite,
(int(1.1*cel_size), int(1.5*cel_size)))
moving_sprites = [pg.image.load(path + dir_str + '_' + aux.to_n_digits(i, 2) + '.png')
for i in [1, 3]]
moving_sprites = [pg.transform.scale(sprite,
(int(1.1*cel_size), int(1.5*cel_size)))
for sprite in moving_sprites]
char_sprites[dir] = {'idle':idle_sprite, 'moving':moving_sprites}
cls.chars.append(Character(char_sprites, char_offset, mov_handler))
@classmethod
def get_free_characters(cls, n):
free_characters = [char for char in cls.chars if char.board is None]
if n > len(free_characters):
raise Exception('Not enough available characters.')
else:
return np.random.choice(free_characters, n, replace=False)
@classmethod
def get_used_characters(cls):
return [char for char in cls.chars if char.board is not None]
def __init__(self, sprites, draw_offset = (0, 0), mov_handler = None):
self.position = None
self.board = None
self.sprites = sprites
self.draw_offset = draw_offset
self.facing = pg.K_DOWN
self.been_moving = 0 #frames
self.mov_handler = mov_handler
# Check if the position in the given direction can be occupied by the character
def can_move_dir(self, direction):
if self.board == None:
raise Exception("Can't move if not on a board.")
if direction == pg.K_UP:
return self.board.can_occupy(self.position[0], self.position[1]-1)
elif direction == pg.K_DOWN:
return self.board.can_occupy(self.position[0], self.position[1]+1)
elif direction == pg.K_LEFT:
return self.board.can_occupy(self.position[0]-1, self.position[1])
elif direction == pg.K_RIGHT:
return self.board.can_occupy(self.position[0]+1, self.position[1])
else:
raise Exception("Invalid direction.")
# Updates the character position, if possible
def move_char(self, direction):
if self.can_move_dir(direction):
new_position = list(self.position)
if direction == pg.K_UP: new_position[1] -= 1
elif direction == pg.K_DOWN: new_position[1] += 1
elif direction == pg.K_LEFT: new_position[0] -= 1
elif direction == pg.K_RIGHT: new_position[0] += 1
self.position = tuple(new_position)
def at_objective(self):
return self.position in self.board.objectives_pos
def am_moving(self):
return self.mov_handler.is_moving() and self.can_move_dir(self.mov_handler.get_direction())
def draw(self, screen, start_x, start_y, cel_size):
dx, dy = (0, 0) #delta from current movement
img = self.sprites[self.facing]['idle']
if self.am_moving():
self.facing = self.mov_handler.get_direction()
# Calculates distance already walked between two cells
distance_covered = int(((cel_size/self.mov_handler.get_move_steps()) *
self.mov_handler.get_steps_taken()))
dx, dy = {pg.K_UP:(0,-distance_covered),
pg.K_DOWN:(0,+distance_covered),
pg.K_LEFT:(-distance_covered,0),
pg.K_RIGHT:(+distance_covered,0)}[self.facing]
# Gets moving sprite
msf = self.mov_handler.get_moving_sprite_frames()
moving_frames = self.sprites[self.facing]['moving']
frame_idx = int(self.been_moving/msf)%len(moving_frames)
img = moving_frames[frame_idx]
self.been_moving += 1
else:
# Restarts moving frames counter
self.been_moving = 0
offset_x, offset_y = self.draw_offset
c_x = start_x + dx + offset_x
c_y = start_y + dy + offset_y
screen.blit(img, (c_x, c_y)) #draws the character
|
[
"dluiscosta@gmail.com"
] |
dluiscosta@gmail.com
|
e34029c784b8c890e80200937ab82cf0f203d542
|
89410e6c78fe48b36af394b0faf343578e1cf4d6
|
/cortes/migrations/0004_remove_service_currency.py
|
7a328845bc168a0794c0b7ce3fbf4b048be4801b
|
[] |
no_license
|
fershopls/drive-sync
|
c28c2c1ce04462b0945a35e8f6ba0f0fb4047331
|
a38114dfbdf0a36b36168e69eadc4a406f2e2d88
|
refs/heads/master
| 2021-01-20T12:16:13.513493
| 2016-08-01T17:09:14
| 2016-08-01T17:09:14
| 64,611,055
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 354
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cortes', '0003_auto_20160731_2331'),
]
operations = [
migrations.RemoveField(
model_name='service',
name='currency',
),
]
|
[
"shaaf4ever@gmail.com"
] |
shaaf4ever@gmail.com
|
c4bf47730ae8d4583e7dd283c569fd03def05a36
|
eb151f9efa525dcfc2a389f9b3c015149f187e84
|
/SMTP/Simple texto.py
|
fae2d4901523db1a42f62657403fd111a119725d
|
[] |
no_license
|
machiscorstia/python
|
4fe1e763e1a383d98ef975e13315dee9882e5cb3
|
97816c37a339019800ebc6a4e4360a08a82fce2d
|
refs/heads/master
| 2021-06-18T13:02:33.031036
| 2021-05-04T00:23:09
| 2021-05-04T00:23:09
| 207,696,777
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 398
|
py
|
import smtplib
Usuario = input("Tu hotmail: ")
Contra = input("Tu contraseña: ")
Asunto = input("Asunto: ")
Mensaje = input("Mensaje: ")
Destinatario = input("Hotmail del destinatario: ")
Mensaje = f"Subject: {Asunto}\n\n{Mensaje}"
Server = smtplib.SMTP('smtp-mail.outlook.com', 587)
Server.starttls()
Server.login(Usuario, Contra)
Server.sendmail(Usuario, Destinatario, Mensaje)
Server.quit()
|
[
"42502197+andrewzhbj@users.noreply.github.com"
] |
42502197+andrewzhbj@users.noreply.github.com
|
294e4269f395cdd8e90770dc494267cf7837fe38
|
9dc870f852fbf20ee18d74e9c80706324d432e80
|
/ex054 - grupo maioridade.py
|
f3d5802d0261eeb05985204f087e351b056cf4a9
|
[] |
no_license
|
M-de-Mateus/Python
|
bdef5b76249387e2e086e0a6b67934802d5b3883
|
3e160946287c2fa9ba5d307bb5262523d03a7ead
|
refs/heads/main
| 2023-03-09T20:54:01.214616
| 2021-02-22T21:48:50
| 2021-02-22T21:48:50
| 341,346,281
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 430
|
py
|
from datetime import date
atual = date.today().year
totmaior = 0
totmenor = 0
for pess in range(1, 8):
nasc = int(input('Em que ano a {}ª pessoa nasceu? '.format(pess)))
idade = atual - nasc
if idade >= 21:
totmaior += 1
else:
totmenor += 1
print('Ao todo tivemos {} pessoas maiores de idade!'.format(totmaior))
print('E também tivemos {} pessoas menores de idade'.format(totmenor))
|
[
"noreply@github.com"
] |
noreply@github.com
|
4d601ae9301044e6512a0294ad6c44860a4c8e44
|
f5342e7023fe150bd7c0cf3dd8c8c078fc56405b
|
/raspberry-pi/main.py
|
3bba75cfc246d81fe92ed13144997a57addb228b
|
[] |
no_license
|
RocketDepartment/hackathon-tbd
|
a4ef7f72bbf352c28a3252c81f15490073e00583
|
5c4793f9a9dcdc9b55f5bbaf4bbcdb089e45ff57
|
refs/heads/master
| 2016-08-12T13:55:39.844326
| 2015-06-17T04:43:40
| 2015-06-17T04:43:40
| 36,991,292
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,976
|
py
|
# NeoPixel library strandtest example
# Author: Tony DiCola (tony@tonydicola.com)
#
# Direct port of the Arduino NeoPixel library strandtest example. Showcases
# various animations on a strip of NeoPixels.
import time
import json
from neopixel import *
# LED strip configuration:
LED_COUNT = 237 # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (must support PWM!).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 5 # DMA channel to use for generating signal (try 5)
LED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
class LEDMatrix(object):
def __init__(self, json):
self.id = None
self.row0 = json["row0"]
self.row1 = json["row1"]
self.row2 = json["row2"]
self.row3 = json["row3"]
self.row4 = json["row4"]
self.row5 = json["row5"]
self.row6 = json["row6"]
self.row7 = json["row7"]
self.row8 = json["row8"]
self.row9 = json["row9"]
self.row10 = json["row10"]
self.row11 = json["row11"]
self.row12 = json["row12"]
self.row13 = json["row13"]
self.row14 = json["row14"]
self.row15 = json["row15"]
self.row16 = json["row16"]
self.row17 = json["row17"]
self.row18 = json["row18"]
self.row19 = json["row19"]
self.row20 = json["row20"]
self.row21 = json["row21"]
self.row22 = json["row22"]
self.row23 = json["row23"]
def loadRow(self, strip, row_index, start_pixel, row):
# for each row in the display
# even index
if row_index % 2 == 0:
for i in range( len(row) ):
r = row[i][0]
g = row[i][1]
b = row[i][2]
strip.setPixelColor( start_pixel+i, Color(r, g, b))
if not row_index % 2 == 0:
for i in range( len(row) ):
r = row[i][0]
g = row[i][1]
b = row[i][2]
strip.setPixelColor( start_pixel+len(row)-i-1, Color(r, g, b))
# Main program logic follows:
if __name__ == '__main__':
# Create NeoPixel object with appropriate configuration.
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS)
# Intialize the library (must be called once before other functions).
strip.begin()
# intialize values
app_url = "http://172.16.100.11:3000/foo"
current_id = None
print 'Press Ctrl-C to quit.'
# main program loop
while True:
import urllib2
data = urllib2.urlopen(app_url).read()
data = json.loads(data)
new_id = data["id"]
print new_id
if not new_id == current_id:
current_id = new_id
print "New Display"
led_matrix = LEDMatrix(data)
led_matrix.loadRow(strip, 0, 0, data["row0"])
led_matrix.loadRow(strip, 1, 9, data["row1"])
led_matrix.loadRow(strip, 2, 18, data["row2"])
led_matrix.loadRow(strip, 3, 27, data["row3"])
led_matrix.loadRow(strip, 4, 37, data["row4"])
led_matrix.loadRow(strip, 5, 46, data["row5"])
led_matrix.loadRow(strip, 6, 55, data["row6"])
led_matrix.loadRow(strip, 7, 64, data["row7"])
led_matrix.loadRow(strip, 8, 72, data["row8"])
led_matrix.loadRow(strip, 9, 81, data["row9"])
led_matrix.loadRow(strip, 10, 90, data["row10"])
led_matrix.loadRow(strip, 11, 99, data["row11"])
led_matrix.loadRow(strip, 12, 109, data["row12"])
led_matrix.loadRow(strip, 13, 120, data["row13"])
led_matrix.loadRow(strip, 14, 132, data["row14"])
led_matrix.loadRow(strip, 15, 144, data["row15"])
led_matrix.loadRow(strip, 16, 157, data["row16"])
led_matrix.loadRow(strip, 17, 170, data["row17"])
led_matrix.loadRow(strip, 18, 183, data["row18"])
led_matrix.loadRow(strip, 19, 197, data["row19"])
led_matrix.loadRow(strip, 20, 208, data["row20"])
led_matrix.loadRow(strip, 21, 217, data["row21"])
led_matrix.loadRow(strip, 22, 225, data["row22"])
led_matrix.loadRow(strip, 23, 231, data["row23"])
strip.setBrightness(255)
strip.show()
# sleep before querying the server again
time.sleep(2000.0/1000.0)
|
[
"emma@rocketdept.com"
] |
emma@rocketdept.com
|
c9ca634b1cfd0a70676f197430bc0680ce1077d0
|
8928c4745515ffecfc581da36df47b0789fb463f
|
/Chapter_9/formsub.py
|
51ea5e0fe04bad40167a6fc40016825b997f0e1d
|
[] |
no_license
|
iluxonchik/webscraping-with-python-book
|
72da36ba8fae016ccc20d44753ec4c46bc933dee
|
ffc5a1459778649d081c62812c8d3edbb2f120a9
|
refs/heads/master
| 2021-01-10T10:19:12.443341
| 2016-01-21T21:50:11
| 2016-01-21T21:50:11
| 48,058,040
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 302
|
py
|
import requests
params = {'firstname':'hello', 'lastname':'there'}
r = requests.post("http://pythonscraping.com/files/processing.php", data=params)
print(r.text)
files = {'uploadFile': open('1.png', 'rb')}
r = requests.post("http://pythonscraping.com/files/processing2.php", files=files)
print(r.text)
|
[
"iluxon4ik@hotmail.com"
] |
iluxon4ik@hotmail.com
|
2408590522753e9cd86637c0677554589f285d76
|
c9d4d4c78703d009da11999e4e59b6a168a454a2
|
/examples/Learning Python The Hard Way/ex11_AskingQuestions.py
|
be33a059dc6c1cfcccb68bbc63af5f196c02ccc2
|
[
"MIT"
] |
permissive
|
AkiraKane/Python
|
23df49d7f7ae0f375e0b4ccfe4e1b6a077b1a52b
|
12e2dcb9a61e9ab0fc5706e4a902c48e6aeada30
|
refs/heads/master
| 2020-12-11T07:20:01.524438
| 2015-11-07T12:42:22
| 2015-11-07T12:42:22
| 47,440,128
| 1
| 0
| null | 2015-12-05T03:15:52
| 2015-12-05T03:15:51
| null |
UTF-8
|
Python
| false
| false
| 335
|
py
|
'''
Created on 2013-8-14
@author: Kelly Chan
Python Version: V3.3
Book: Learn Python The Hard Way
Ex11: Asking Questions
'''
print("How old are you?")
age = input()
print("How tall are you?")
height = input()
print("How much do you weigh?")
weight = input()
print("So, you're %r old, %r tall and %r heavy." % (age, height, weight))
|
[
"kwailamchan@hotmail.com"
] |
kwailamchan@hotmail.com
|
407f054f0fb54d411494281b4318ce15b9bfe179
|
51d960987e5622f1279942049022d6da61062a08
|
/Project_Euler/python/p027.py
|
43d0c8c3d7566eba8beb57f05e9f52c012d2f142
|
[] |
no_license
|
AdamAtkins-Public/current
|
f1ce8d2c5d64ec434b46c26f0098096058a1fd52
|
e6349c31be49939c5f2704e15f0f5ee43bf32167
|
refs/heads/master
| 2023-05-25T18:06:22.522406
| 2023-05-14T17:21:02
| 2023-05-14T17:21:02
| 238,833,057
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,857
|
py
|
import os
import time
"""
Euler discovered the remarkable quadratic formula:
n^2 + n + 41
It turns out that the formula will produce 40 primes for the consecutive integer values 0 \le n \le 39.
However, when
n = 40, 40^2 + 40 + 41 = 40(40 + 1) + 41
is divisible by 41,
and certainly when
n = 41, 41^2 + 41 + 41
is clearly divisible by 41.
The incredible formula n^2 - 79n + 1601
was discovered, which produces 80 primes for the consecutive values 0 \le n \le 79.
The product of the coefficients, −79 and 1601, is −126479.
Considering quadratics of the form:
n^2 + an + b, where |a| < 1000 and |b| \le 1000
where |n| is the modulus/absolute value of n
e.g. |11| = 11 and |-4| = 4
Find the product of the coefficients, a and b,
for the quadratic expression that produces the maximum number of primes for consecutive values of n, starting with n=0.
"""
def is_prime(n):
if n == 2:
return True
elif n < 2:
return False
if n & 1 == 0:
return False
for i in range(3,int(n**0.5),2):
if n % i == 0:
return False
return True
def quadratic(n,a,b):
return n**2 + a*n + b
def brute_force():
primes = {}
max_count = (int(-1),int(),int())
count = 0
for a in range(-999,1000):
for b in range(-1000,1001):
n = 0
while(True):
number = quadratic(n,a,b)
if number not in primes:
if is_prime(number): primes[number] = 1
if number in primes:
if n > max_count[0]: max_count = (n,a,b)
n += 1
else:
break
return max_count
if __name__ == '__main__':
stime = time.time()
max_count = brute_force()
print("Solution: {0}\nRuntime: {1}".format(max_count[1]*max_count[2],time.time()-stime))
|
[
"ajatkins@wsu.edu"
] |
ajatkins@wsu.edu
|
97b8958344299980760e8949312f7387d6e8b9ae
|
49eba6a6d52b46171d88adc87cd2f761c0eb90d4
|
/crypten/mpc/provider/ttp_provider.py
|
e3a09ac97d52be242b3c1ecc03184acbe8da7d7b
|
[
"MIT"
] |
permissive
|
tnpe/CrypTen
|
8bfc07556df2b497214a4b32c11eb180f62e45dd
|
6a06dc8cd52200f40a9fc520be0066bd0dea6b14
|
refs/heads/master
| 2023-02-26T10:15:39.729336
| 2021-01-20T14:54:29
| 2021-01-20T14:56:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,063
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from functools import reduce
import crypten
import crypten.communicator as comm
import torch
import torch.distributed as dist
from crypten.common.rng import generate_kbit_random_tensor, generate_random_ring_element
from crypten.common.util import count_wraps, torch_stack
from crypten.mpc.primitives import ArithmeticSharedTensor, BinarySharedTensor
TTP_FUNCTIONS = ["additive", "square", "binary", "wraps", "B2A"]
class TrustedThirdParty:
NAME = "TTP"
@staticmethod
def generate_additive_triple(size0, size1, op, device=None, *args, **kwargs):
"""Generate multiplicative triples of given sizes"""
generator = TTPClient.get().get_generator(device=device)
a = generate_random_ring_element(size0, generator=generator, device=device)
b = generate_random_ring_element(size1, generator=generator, device=device)
if comm.get().get_rank() == 0:
# Request c from TTP
c = TTPClient.get().ttp_request(
"additive", device, size0, size1, op, *args, **kwargs
)
else:
# TODO: Compute size without executing computation
c_size = getattr(torch, op)(a, b, *args, **kwargs).size()
c = generate_random_ring_element(c_size, generator=generator, device=device)
a = ArithmeticSharedTensor.from_shares(a, precision=0)
b = ArithmeticSharedTensor.from_shares(b, precision=0)
c = ArithmeticSharedTensor.from_shares(c, precision=0)
return a, b, c
@staticmethod
def square(size, device=None):
"""Generate square double of given size"""
generator = TTPClient.get().get_generator(device=device)
r = generate_random_ring_element(size, generator=generator, device=device)
if comm.get().get_rank() == 0:
# Request r2 from TTP
r2 = TTPClient.get().ttp_request("square", device, size)
else:
r2 = generate_random_ring_element(size, generator=generator, device=device)
r = ArithmeticSharedTensor.from_shares(r, precision=0)
r2 = ArithmeticSharedTensor.from_shares(r2, precision=0)
return r, r2
@staticmethod
def generate_binary_triple(size0, size1, device=None):
"""Generate binary triples of given size"""
generator = TTPClient.get().get_generator(device=device)
a = generate_kbit_random_tensor(size0, generator=generator, device=device)
b = generate_kbit_random_tensor(size1, generator=generator, device=device)
if comm.get().get_rank() == 0:
# Request c from TTP
c = TTPClient.get().ttp_request("binary", device, size0, size1)
else:
size2 = torch.broadcast_tensors(a, b)[0].size()
c = generate_kbit_random_tensor(size2, generator=generator, device=device)
# Stack to vectorize scatter function
a = BinarySharedTensor.from_shares(a)
b = BinarySharedTensor.from_shares(b)
c = BinarySharedTensor.from_shares(c)
return a, b, c
@staticmethod
def wrap_rng(size, device=None):
"""Generate random shared tensor of given size and sharing of its wraps"""
generator = TTPClient.get().get_generator(device=device)
r = generate_random_ring_element(size, generator=generator, device=device)
if comm.get().get_rank() == 0:
# Request theta_r from TTP
theta_r = TTPClient.get().ttp_request("wraps", device, size)
else:
theta_r = generate_random_ring_element(
size, generator=generator, device=device
)
r = ArithmeticSharedTensor.from_shares(r, precision=0)
theta_r = ArithmeticSharedTensor.from_shares(theta_r, precision=0)
return r, theta_r
@staticmethod
def B2A_rng(size, device=None):
"""Generate random bit tensor as arithmetic and binary shared tensors"""
generator = TTPClient.get().get_generator(device=device)
# generate random bit
rB = generate_kbit_random_tensor(
size, bitlength=1, generator=generator, device=device
)
if comm.get().get_rank() == 0:
# Request rA from TTP
rA = TTPClient.get().ttp_request("B2A", device, size)
else:
rA = generate_random_ring_element(size, generator=generator, device=device)
rA = ArithmeticSharedTensor.from_shares(rA, precision=0)
rB = BinarySharedTensor.from_shares(rB)
return rA, rB
@staticmethod
def rand(*sizes, encoder=None, device=None):
"""Generate random ArithmeticSharedTensor uniform on [0, 1]"""
generator = TTPClient.get().get_generator(device=device)
if isinstance(sizes, torch.Size):
sizes = tuple(sizes)
if isinstance(sizes[0], torch.Size):
sizes = tuple(sizes[0])
if comm.get().get_rank() == 0:
# Request samples from TTP
samples = TTPClient.get().ttp_request(
"rand", device, *sizes, encoder=encoder
)
else:
samples = generate_random_ring_element(
sizes, generator=generator, device=device
)
return ArithmeticSharedTensor.from_shares(samples)
@staticmethod
def _init():
TTPClient._init()
@staticmethod
def uninit():
TTPClient.uninit()
class TTPClient:
__instance = None
class __TTPClient:
"""Singleton class"""
def __init__(self):
# Initialize connection
self.ttp_group = comm.get().ttp_group
self.comm_group = comm.get().ttp_comm_group
self._setup_generators()
logging.info(f"TTPClient {comm.get().get_rank()} initialized")
def _setup_generators(self):
seed = torch.empty(size=(), dtype=torch.long)
dist.irecv(
tensor=seed, src=comm.get().get_ttp_rank(), group=self.ttp_group
).wait()
dist.barrier(group=self.ttp_group)
self.generator = torch.Generator(device="cpu")
self.generator_cuda = torch.Generator(device="cuda")
self.generator.manual_seed(seed.item())
self.generator_cuda.manual_seed(seed.item())
def get_generator(self, device=None):
if device is None:
device = "cpu"
device = torch.device(device)
if device.type == "cuda":
return self.generator_cuda
else:
return self.generator
def ttp_request(self, func_name, device, *args, **kwargs):
assert (
comm.get().get_rank() == 0
), "Only party 0 communicates with the TTPServer"
if device is not None:
device = str(device)
message = {
"function": func_name,
"device": device,
"args": args,
"kwargs": kwargs,
}
ttp_rank = comm.get().get_ttp_rank()
comm.get().send_obj(message, ttp_rank, self.ttp_group)
size = comm.get().recv_obj(ttp_rank, self.ttp_group)
result = torch.empty(size, dtype=torch.long, device=device)
comm.get().broadcast(result, ttp_rank, self.comm_group)
return result
@staticmethod
def _init():
"""Initializes a Trusted Third Party client that sends requests"""
if TTPClient.__instance is None:
TTPClient.__instance = TTPClient.__TTPClient()
@staticmethod
def uninit():
"""Uninitializes a Trusted Third Party client"""
del TTPClient.__instance
TTPClient.__instance = None
@staticmethod
def get():
"""Returns the instance of the TTPClient"""
if TTPClient.__instance is None:
raise RuntimeError("TTPClient is not initialized")
return TTPClient.__instance
class TTPServer:
TERMINATE = -1
def __init__(self):
"""Initializes a Trusted Third Party server that receives requests"""
# Initialize connection
crypten.init()
self.ttp_group = comm.get().ttp_group
self.comm_group = comm.get().ttp_comm_group
self.device = "cpu"
self._setup_generators()
ttp_rank = comm.get().get_ttp_rank()
logging.info("TTPServer Initialized")
try:
while True:
# Wait for next request from client
message = comm.get().recv_obj(0, self.ttp_group)
logging.info("Message received: %s" % message)
if message == "terminate":
logging.info("TTPServer shutting down.")
return
function = message["function"]
device = message["device"]
args = message["args"]
kwargs = message["kwargs"]
self.device = device
result = getattr(self, function)(*args, **kwargs)
comm.get().send_obj(result.size(), 0, self.ttp_group)
comm.get().broadcast(result, ttp_rank, self.comm_group)
except RuntimeError as err:
logging.info("Encountered Runtime error. TTPServer shutting down:")
logging.info(f"{err}")
def _setup_generators(self):
"""Create random generator to send to a party"""
ws = comm.get().get_world_size()
seeds = [torch.randint(-(2 ** 63), 2 ** 63 - 1, size=()) for _ in range(ws)]
reqs = [
dist.isend(tensor=seeds[i], dst=i, group=self.ttp_group) for i in range(ws)
]
self.generators = [torch.Generator(device="cpu") for _ in range(ws)]
self.generators_cuda = [torch.Generator(device="cuda") for _ in range(ws)]
for i in range(ws):
self.generators[i].manual_seed(seeds[i].item())
self.generators_cuda[i].manual_seed(seeds[i].item())
reqs[i].wait()
dist.barrier(group=self.ttp_group)
def _get_generators(self, device=None):
if device is None:
device = "cpu"
device = torch.device(device)
if device.type == "cuda":
return self.generators_cuda
else:
return self.generators
def _get_additive_PRSS(self, size, remove_rank=False):
"""
Generates a plaintext value from a set of random additive secret shares
generated by each party
"""
gens = self._get_generators(device=self.device)
if remove_rank:
gens = gens[1:]
result = torch_stack(
[
generate_random_ring_element(size, generator=g, device=g.device)
for g in gens
]
)
return result.sum(0)
def _get_binary_PRSS(self, size, bitlength=None, remove_rank=None):
"""
Generates a plaintext value from a set of random binary secret shares
generated by each party
"""
gens = self._get_generators(device=self.device)
if remove_rank:
gens = gens[1:]
result = [
generate_kbit_random_tensor(
size, bitlength=bitlength, generator=g, device=g.device
)
for g in gens
]
return reduce(lambda a, b: a ^ b, result)
def additive(self, size0, size1, op, *args, **kwargs):
# Add all shares of `a` and `b` to get plaintext `a` and `b`
a = self._get_additive_PRSS(size0)
b = self._get_additive_PRSS(size1)
c = getattr(torch, op)(a, b, *args, **kwargs)
# Subtract all other shares of `c` from plaintext value of `c` to get `c0`
c0 = c - self._get_additive_PRSS(c.size(), remove_rank=True)
return c0
def square(self, size):
# Add all shares of `r` to get plaintext `r`
r = self._get_additive_PRSS(size)
r2 = r.mul(r)
return r2 - self._get_additive_PRSS(size, remove_rank=True)
def binary(self, size0, size1):
# xor all shares of `a` and `b` to get plaintext `a` and `b`
a = self._get_binary_PRSS(size0)
b = self._get_binary_PRSS(size1)
c = a & b
# xor all other shares of `c` from plaintext value of `c` to get `c0`
c0 = c ^ self._get_binary_PRSS(c.size(), remove_rank=True)
return c0
def wraps(self, size):
r = [generate_random_ring_element(size, generator=g) for g in self.generators]
theta_r = count_wraps(r)
return theta_r - self._get_additive_PRSS(size, remove_rank=True)
def B2A(self, size):
rB = self._get_binary_PRSS(size, bitlength=1)
# Subtract all other shares of `rA` from plaintext value of `rA`
rA = rB - self._get_additive_PRSS(size, remove_rank=True)
return rA
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
5f9a7f944e9cdd3ded69ec2885ddbcec4a0039dc
|
a0f6c66bb22b20066329de30971f6880b5098765
|
/ex015.py
|
baad93f68c949b579b0869266889efcd51a28475
|
[] |
no_license
|
EderOBarreto/exercicios-python
|
dcfed885e2723a57974cb479f7c6f3d0b63ca62f
|
88bbd6903c8ca9bf705c2e1d6adeae4bb8799f40
|
refs/heads/master
| 2020-03-18T20:35:27.891987
| 2018-06-25T15:31:31
| 2018-06-25T15:31:31
| 135,226,116
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 472
|
py
|
cores = {'limpa': '\033[m',
'azul': '\033[34m',
'amarelo': '\033[33m',
'verde_bold': '\033[1;32m',
'magenta': '\033[35m',
'preto_e_branco': '\033[7;30m',
'sublinhado': '\033[4m'}
qtdKm = float(input('Quantidade de quilometros percorridos: '))
qtdDias = int(input('Quantidade de dias alugado: '))
print('A quantidade a pagar é {}R$: {}{:.2f}'.format(cores['verde_bold'], cores['limpa'], qtdDias * 60 + qtdKm * 0.15))
|
[
"ederoliveirabarreto@gmail.com"
] |
ederoliveirabarreto@gmail.com
|
e7df1ffbd062f8f616fff956e0482311a709c86a
|
e4d4149a717d08979953983fa78fea46df63d13d
|
/Week6/Day5/DailyChallenge.py
|
68788690cf6a298238eff2f8a5648c48d64c3f7a
|
[] |
no_license
|
fayblash/DI_Bootcamp
|
72fd75497a2484d19c779775c49e4306e602d10f
|
a4e8f62e338df5d5671fd088afa575ea2e290837
|
refs/heads/main
| 2023-05-05T20:55:31.513558
| 2021-05-27T06:48:40
| 2021-05-27T06:48:40
| 354,818,813
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 615
|
py
|
import sqlite3 as sl
from time import time
import requests
import json
connection=sl.connect("countries.db")
cursor=connection.cursor()
start=time()
for i in range(10,21):
data=requests.get("https://restcountries.eu/rest/v2/all")
country=data.json()
print (country[i]['name'])
query=f"INSERT INTO countries(name,capital,flag,subregion,population) VALUES ('{country[i]['name']}','{country[i]['capital']}','{country[i]['flag']}','{country[i]['subregion']}','{country[i]['population']}')"
cursor.execute(query)
connection.commit()
connection.close()
end=time()
print(end-start)
|
[
"fayblash@gmail.com"
] |
fayblash@gmail.com
|
f55192badac567d785a5f2be8c8ef2460664d21e
|
d645a5629160fb510c62e7b8f00061feafbe112c
|
/flight_data.py
|
f8b5ea399faf69da5b363ef3e163a735bc6ce0c1
|
[] |
no_license
|
Phern17/flight-deals
|
fe80a6234b57864f1a6713f0c5c0c8c832ecc303
|
96fa4416423a96ca23286fa842a81fa87c8473dd
|
refs/heads/master
| 2023-03-31T10:01:41.692720
| 2021-04-08T12:45:55
| 2021-04-08T12:45:55
| 354,899,835
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 567
|
py
|
class FlightData:
# This class is responsible for structuring the flight data.
def __init__(self, price, city_from, fly_from, city_to, fly_to,
leave_date, return_date, stop_overs=0, via_city=""):
self.price = price
self.departure_city_name = city_from
self.departure_airport = fly_from
self.arrival_city_name = city_to
self.arrival_airport = fly_to
self.outbound_date = leave_date
self.inbound_date = return_date
self.stop_overs = stop_overs
self.via_city = via_city
|
[
"mphern17@gmail.com"
] |
mphern17@gmail.com
|
97698fdcf4861c65a25ec9893aa57e5b52a06063
|
b6b2be9866fd16699ad5c30a21bbcb70755f1e57
|
/Experiments/_Legacy/Chicago/PartitionByDocThenClusterUsingLsa.py
|
6e174934f81ca58b1b992fbc3c3f4391ac23815f
|
[] |
no_license
|
simonhughes22/PythonNlpResearch
|
24a482c7036c568b063ec099176b393d45a0a86b
|
2bc2914ce93fcef6dbd26f8097eec20b7d0e476d
|
refs/heads/master
| 2022-12-08T17:39:18.332177
| 2019-10-26T12:48:33
| 2019-10-26T12:48:33
| 16,458,105
| 17
| 7
| null | 2022-12-07T23:38:17
| 2014-02-02T16:36:39
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,135
|
py
|
import Clusterer
import ClustersToFile
import SentenceData
import ListHelper
import Lsa
import MatrixHelper
import TfIdf
import WordTokenizer
import logging
import PartitionByCode
import CosineSimilarity
import collections
def find_closest_document(txtMatrixByCode, row):
""" Takes a dictionary of codes to LSA matrices (one per document)
and returns the key for the closest doc based on the mean
cosine similarity (could also use max...)
"""
if len(row) == 0:
return "ERROR"
means_per_code = {}
for doc in txtMatrixByCode.keys():
distance_matrix = txtMatrixByCode[doc]
total = 0.0
for row_to_test in distance_matrix:
sim = CosineSimilarity.cosine_similarity(row, row_to_test)
total += sim
means_per_code[doc] = total / len(distance_matrix)
# first row, first tuple (key)
return sorted(means_per_code.items(), key = lambda item: item[1], reverse = True)[0][0]
def train(num_lsa_topics, k):
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
#TOKENIZE
xs = SentenceData.SentenceData()
tokenizer = WordTokenizer.WordTokenizer(min_word_count = 5)
tokenized_docs = tokenizer.tokenize(xs.documents)
#MAP TO VECTOR AND SEMANTIC SPACE
tfidf = TfIdf.TfIdf(tokenized_docs)
lsa = Lsa.Lsa(tfidf, num_topics = num_lsa_topics)
full_lsa_matrix = MatrixHelper.gensim_to_python_mdarray(lsa.distance_matrix, num_lsa_topics)
#TODO Partition into Docs by LSA sim
txt_codes = xs.text_codes
clusters_per_text_code = int(round( k/ float((len(txt_codes)))))
#Extract the sm code rows from LSA
smCodeRows = ListHelper.filter_list_by_index(full_lsa_matrix, xs.sm_code_indices)
smCodeClassifications = ListHelper.filter_list_by_index(xs.codes_per_document, xs.sm_code_indices)
smCodeCategoryClassifications = ListHelper.filter_list_by_index(xs.categories_per_document, xs.sm_code_indices)
# Dict of <code, list[list]]> - LSA row vectors
logging.info("Partitioning LSA distance_matrix by Source Document")
txtMatrixByCode = PartitionByCode.partition(full_lsa_matrix, xs, xs.text_codes)
closest_docs = [find_closest_document(txtMatrixByCode, row) for row in smCodeRows]
matrix_by_doc = collections.defaultdict(list)
for i, doc in enumerate(closest_docs):
matrix_by_doc[doc].append(smCodeRows[i])
#Stores all cluster labels
logging.info("Clustering within a document")
all_smcode_labels = []
label_offset = 0
for doc in xs.text_codes:
distance_matrix = matrix_by_doc[doc]
#CLUSTER
clusterer = Clusterer.Clusterer(clusters_per_text_code)
labels = clusterer.Run(distance_matrix)
all_smcode_labels = all_smcode_labels + [int(l + label_offset) for l in labels]
label_offset += clusters_per_text_code
#OUTPUT
file_name_code_clusters = "Partition_By_Doc_LSA_SMCODES_k-means_k_{0}_dims_{1}.csv".format(k, num_lsa_topics)
ClustersToFile.clusters_to_file(file_name_code_clusters, all_smcode_labels, smCodeClassifications, "Chicago")
file_name_category_clusters = "Partition_By_Doc_LSA_categories_k-means_k_{0}_dims_{1}.csv".format(k, num_lsa_topics)
ClustersToFile.clusters_to_file(file_name_category_clusters, all_smcode_labels, smCodeCategoryClassifications, "Chicago")
#TODO - filter the category and the docs per docs to the sm codes and output
#file_name_category_clusters = "Partition_By_Doc_LSA_categories_k-means_k_{0}_dims_{1}.txt".format(k, num_lsa_topics)
#ClustersToFile.clusters_to_file(file_name_category_clusters, all_smcode_labels, smCodeClassifications, "Chicago")
print "Finished processing lsa clustering for dims: {0} and k: {1}".format(num_lsa_topics, k)
if __name__ == "__main__":
#k = cluster size
#for k in range(40,41,1): #start, end, increment size
# train(300, k)
train(num_lsa_topics = 300, k = 30)
|
[
"simon.hughes@dice.com"
] |
simon.hughes@dice.com
|
9bb1d9572322f2245a1ddd6e0497df2316d83521
|
0236574468a64e0e2c5b7ae4fec9817d5b35e185
|
/Apple_Packaging_OOP.py
|
a8d8f3e211353d513aeed12fd3e4865ed923877a
|
[] |
no_license
|
SouravKumarSahu/Python
|
07d83503f25e852f251316a0c5e7d2e2875ed418
|
536d209dded03bea56e5baf29130386a02cf902a
|
refs/heads/master
| 2022-12-21T10:28:02.561297
| 2020-09-29T15:21:00
| 2020-09-29T15:21:00
| 260,421,555
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,255
|
py
|
import random
class Apple:
def __init__(self):
self.weight = random.uniform(0.2, 0.5)
class Package:
total_no_of_apples = 0
max_weight_per_pack = 300
def __init__(self,pack_num):
self.pack_num = pack_num
self.apples_in_this_pack = 0
self.weight_of_this_pack = 0
def add_apple(self,apple):
self.weight_of_this_pack += apple.weight
if self.weight_of_this_pack < Package.max_weight_per_pack:
self.apples_in_this_pack += 1
Package.total_no_of_apples += 1
return False
else:
#print(f"package {self.pack_num} is full, use another !!!")
return True
package_full = True
conatiner = []
i = 0
while Package.total_no_of_apples <= 1000 :
if package_full:
i += 1
new_package = Package(f"package_{i}")
conatiner.append(new_package)
package_full = False
else:
package_full = new_package.add_apple(Apple())
print(f"Number of packages {len(conatiner)} with total apples {Package.total_no_of_apples}")
for x in conatiner:
print(f"Package: {x.pack_num} contains {x.apples_in_this_pack} apples and weigh around {x.weight_of_this_pack}")
|
[
"noreply@github.com"
] |
noreply@github.com
|
64c89002c3f6155fd306d67c7232d3af1ed40e06
|
a2a8e76b512a5e297af27d2772d2be6545fec4d7
|
/blog/engine/migrations/0008_post_author.py
|
dea94170cf5342f8967f9de5a58be40de0ec3a69
|
[] |
no_license
|
HolidayMan/pinkerblog
|
1564f3942e91702701d92d9f857aa3d6d18d3e83
|
5c2ac85bd2fd5defcdc57cc419fbe0c05fbdc92d
|
refs/heads/master
| 2020-05-27T04:16:54.750920
| 2019-07-26T15:13:58
| 2019-07-26T15:13:58
| 188,479,937
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 675
|
py
|
# Generated by Django 2.2.1 on 2019-07-24 10:37
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('engine', '0007_auto_20190723_2124'),
]
operations = [
migrations.AddField(
model_name='post',
name='author',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='author', to=settings.AUTH_USER_MODEL, verbose_name='Author of post'),
preserve_default=False,
),
]
|
[
"lesha.pitenko791@gmail.com"
] |
lesha.pitenko791@gmail.com
|
f12fe98bcb5094cf6dcb0f1c17276e17ffb58e2d
|
10cc5165ab3a98585476c7182dfe1eafd61d54aa
|
/fast_sort.py
|
d581986b5cf50cb2bfca57b2bd192d1fba06db8c
|
[] |
no_license
|
walrus-coder/sortings
|
ab9d95d1d9062a3740d8d5712e755b3e8251d887
|
2744ff3a2ac00db5bac42013dec6b744a9d44cc3
|
refs/heads/main
| 2023-08-17T06:40:43.382204
| 2021-09-12T11:10:59
| 2021-09-12T11:10:59
| 405,611,030
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,555
|
py
|
# Uses python3
import sys
import random
#Становление элемента в правильное положение и всех элементов одинакого с ним значеения
def partition3(a, l, r):
x = a[l]
j = l
m_1 = l
for i in range(l + 1, r + 1):
if a[i] <= x:
j += 1
m_1 += 1
a[i], a[j] = a[j], a[i]
a[j], a[m_1] = a[m_1], a[j]
if a[m_1] == x:
m_1 -= 1
a[l], a[j] = a[j], a[l]
m_1 += 1
return (m_1, j)
# Становление элемента в правильное положение без учета повторяющихся значений
def partition2(a, l, r):
x = a[l]
j = l
for i in range(l + 1, r + 1):
if a[i] <= x:
j += 1
a[i], a[j] = a[j], a[i]
a[l], a[j] = a[j], a[l]
return j
# Сама сортировка со случайным выбором опорного элемента
def randomized_quick_sort(a, l, r):
if l >= r:
return
k = random.randint(l, r)
a[l], a[k] = a[k], a[l]
#use partition3
m1, m2 = partition3(a, l, r)
randomized_quick_sort(a, l, m1 - 1);
randomized_quick_sort(a, m2 + 1, r);
if __name__ == '__main__':
# input = sys.stdin.read()
n = int(input())
a = list(map(int, input().split())) # Элементы массива вводятся в строке через пробел
randomized_quick_sort(a, 0, n - 1)
for x in a:
print(x, end=' ')
|
[
"noreply@github.com"
] |
noreply@github.com
|
ceac1df02bd7c8967f0b6f48781922a5e29eb5f6
|
25af9c63d7acf147667c08618e7abf46cab395fa
|
/lista/schemas/item_schema.py
|
e5a90510f8413a5001d986ddbf2e548d4d7eaab2
|
[
"MIT"
] |
permissive
|
ViniciusGarciaSilva/izi-serv-backend
|
82c818466f03542471d99f88efd1e52025ff3beb
|
6b9288b9bf4b20bfe86c291dc321f3e1476f1198
|
refs/heads/master
| 2020-04-09T00:03:35.678223
| 2018-12-14T19:20:32
| 2018-12-14T19:20:32
| 159,851,624
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 329
|
py
|
from marshmallow_sqlalchemy import ModelSchema
from marshmallow import fields
from lista.schemas.lista_schema import ListaSchema
from lista.models.item_model import ItemModel
class ItemSchema(ModelSchema):
listas = fields.Nested("ListaSchema",many=True, exclude=('itens','usuario'))
class Meta:
model = ItemModel
|
[
"rogerionakamashi@gmail.com"
] |
rogerionakamashi@gmail.com
|
bbc44f2052056bc7a9d0864ae92dfbc9a46500ca
|
0a3d83e7219ec1440e7af8e0913ce9dd8494b244
|
/run.py
|
b2a1527321e232393d2246292f9539d4220cb4da
|
[] |
no_license
|
kingking888/jdScrapy
|
4a7c5cac2dc6b89b6a5156f035c55b3681eb2db3
|
e512b08a941af6da032cd0763bab49b747f3ba5d
|
refs/heads/master
| 2020-07-26T09:25:49.129791
| 2019-09-11T11:34:04
| 2019-09-11T11:34:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 72
|
py
|
from scrapy import cmdline
cmdline.execute(["scrapy", "crawl", "ipad"])
|
[
"wuyeyisheng@qq.com"
] |
wuyeyisheng@qq.com
|
00a109d7ceb3af65458a2708817bd3fcbd90c405
|
38f19ae4963df9be7a851458e63ffb94d824eb03
|
/stellar_sdk/__version__.py
|
328854e119791d65c471bc64c026b784c86512b9
|
[
"Apache-2.0"
] |
permissive
|
brunodabo/py-stellar-base
|
e033da687e3a2a76076cfed88e82c7951ae4e57e
|
7897a23bc426324cb389a7cdeb695dfce10a673f
|
refs/heads/master
| 2022-11-05T12:35:37.140735
| 2020-06-03T13:41:30
| 2020-06-03T13:54:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 860
|
py
|
"""
_____ _______ ______ _ _ _____ _____ _____ _ __
/ ____|__ __| ____| | | | /\ | __ \ / ____| __ \| |/ /
| (___ | | | |__ | | | | / \ | |__) |____| (___ | | | | ' /
\___ \ | | | __| | | | | / /\ \ | _ /______\___ \| | | | <
____) | | | | |____| |____| |____ / ____ \| | \ \ ____) | |__| | . \
|_____/ |_| |______|______|______/_/ \_\_| \_\ |_____/|_____/|_|\_\
"""
__title__ = "stellar-sdk"
__description__ = "The Python Stellar SDK library provides APIs to build transactions and connect to Horizon."
__url__ = "https://github.com/StellarCN/py-stellar-base"
__issues__ = "{}/issues".format(__url__)
__version__ = "2.5.2"
__author__ = "Eno, overcat"
__author_email__ = "appweb.cn@gmail.com, 4catcode@gmail.com"
__license__ = "Apache License 2.0"
|
[
"4catcode@gmail.com"
] |
4catcode@gmail.com
|
c7d65ec8c039170dc15764a64a42a49d41b68ba6
|
c6a10621c8db1f3b3cab2239eb6eefb1049fbb18
|
/src/extractor/test/test_ner.py
|
1992f6436ec3fb55d7dfd8fe75a602efa5dce3c5
|
[] |
no_license
|
SilverHelmet/KnowledgeGraph
|
26386e58f5579699477f7884578475aeeb5fb781
|
07b5318596e4cfa431675c0ffd86a5489c7c23a3
|
refs/heads/master
| 2020-05-21T17:00:12.720936
| 2018-01-30T07:08:26
| 2018-01-30T07:08:26
| 84,635,531
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,179
|
py
|
#encoding: utf-8
from ...IOUtil import data_dir, load_file, rel_ext_dir
from ..entity.ner import NamedEntityReg, NamedEntityPostProcessor
from ..util import load_stanford_result
from ..ltp import LTP
import os
import glob
class Data:
def __init__(self, url, title, sentence):
self.url = url
self.title = title
self.sentence = sentence
self.entities = []
self.bk_urls = []
def add(self, entity_str, bk_url):
self.entities.append(entity_str)
self.bk_urls.append(bk_url)
def read_data_from_file(filepath, datas_map, ignore_miss):
title = None
sentence = None
data = None
datas = []
url = os.path.basename(filepath).split('.')[0]
if url == '塞尔达传说时之笛':
url = '塞尔达传说:时之笛'
for idx, line in enumerate(file(filepath), start = 1):
line = line.rstrip()
if line == "":
continue
if title is None:
title = line
elif line == '$':
title = None
sentence = None
data = None
else:
if line.startswith('\t'):
assert sentence is not None
p = line.strip().split('\t')
assert len(p) == 2
if p[1].strip() == "*" and ignore_miss:
pass
else:
data.add(p[0], p[1])
else:
sentence = line
data = Data(url, title, sentence)
datas.append(data)
datas_map[url] = datas
def read_data(filepath, ignore_miss):
datas_map = {}
for filepath in glob.glob(filepath + '/*tsv'):
read_data_from_file(filepath, datas_map, ignore_miss)
return datas_map
class Estimation:
def __init__(self):
# error + right = total output
# miss + right = total labeled
# miss_partial + miss_nn + miss_seg + miss_other = miss
self.total_output = 0
self.total_labeled = 0
self.right = 0
self.error = 0
self.miss = 0
self.miss_partial = 0
self.miss_nn = 0
self.miss_seg = 0
self.miss_other = 0
def print_info(self):
print "total output:\t%d" %self.total_output
print "total labeled:\t%d" %self.total_labeled
print "right output:\t%d(%f)" %(self.right, (self.right + 0.0)/ self.total_output)
print "error output:\t%d" %self.error
print 'miss number:\t%d' %self.miss
miss = self.miss + 0.0
f = [self.miss_partial / miss, self.miss_seg/ miss, self.miss_nn /miss, self.miss_other / miss]
f = [round(x, 2) for x in f]
print "miss type: %d(%f) %d(%f) %d(%f) %d(%f)" %(self.miss_partial, f[0], self.miss_seg, f[1], self.miss_nn, f[2], self.miss_other, f[3])
class Estimator:
def __init__(self):
self.estimation = Estimation()
def clear(self):
self.estimation = Estimation()
def add(self, ltp_result, entities_name, ner_entities_name, debug = False):
entities_name = [e if type(e) is str else e.encode('utf-8') for e in entities_name]
ner_entities_name = [e if type(e) is str else e.encode('utf-8') for e in ner_entities_name]
self.estimation.total_output += len(ner_entities_name)
self.estimation.total_labeled += len(entities_name)
entities_name_set = set(entities_name)
if debug:
print sentence
print '\t%s' %"\t".join(ner_entities_name)
print '\t',
for entity in ner_entities_name:
if entity in entities_name_set:
self.estimation.right += 1
else:
self.estimation.error += 1
# miss analysis
miss_type_map = {}
for entity in entities_name:
if entity in ner_entities_name:
continue
if debug:
print "\t%s" %(entity),
self.estimation.miss += 1
if self.check_include(entity, ner_entities_name):
self.estimation.miss_partial += 1
miss_type_map[entity] = 1
continue
st, ed = self.find_pos(ltp_result, entity)
if st == -1:
self.estimation.miss_seg += 1
miss_type_map[entity] = 2
continue
if self.check_noun(ltp_result, st, ed):
self.estimation.miss_nn += 1
miss_type_map[entity] = 3
else:
self.estimation.miss_other += 1
miss_type_map[entity] = 4
if debug:
print ""
return miss_type_map
def check_include(self, entity, reco_entities):
for reco_entity in reco_entities:
if reco_entity.find(entity) != -1 or entity.find(reco_entity) != -1:
return True
return False
def find_pos(self, ltp_result, entity):
length = ltp_result.length
for st in range(length):
for ed in range(st + 1, length + 1):
string = ltp_result.text(st, ed)
if len(string) > len(entity):
break
if string == entity:
return st, ed
return -1, -1
def check_noun(self, ltp_result, st, ed):
noun_tags = set(['n', 'nh', 'ni', 'nl', 'ns', 'nz'])
for tag in ltp_result.tags[st:ed]:
if not tag in noun_tags:
return False
return True
if __name__ == "__main__":
datas_map = read_data(os.path.join(data_dir, '实体标注'), False)
ltp = LTP(None)
est = Estimator()
ner = NamedEntityReg(process_bracket_flag = False, add_time_entity = False)
base_dir = os.path.join(data_dir, '实体标注')
# stf_results_map = load_stanford_result(os.path.join(base_dir, 'sentences.txt'), os.path.join(base_dir, 'sentences_stanf_nlp.json'))
for data_type in datas_map:
datas = datas_map[data_type]
for data in datas:
sentence = data.sentence
ltp_result = ltp.parse(sentence)
# direct result of ltp ner, replace ner_entities_name with your improved result
# ner_entities_name = []
# for idx, ner_tag in enumerate(ltp_result.ner_tags):
# if ner_tag.startswith('S'):
# ner_entities_name.append(ltp_result.text(idx, idx + 1))
# elif ner_tag.startswith('B'):
# st = idx
# elif ner_tag.startswith('E'):
# ner_entities_name.append(ltp_result.text(st, idx + 1))
# stf_result = stf_results_map[sentence]
stf_result = None
str_entities = ner.recognize(sentence, ltp_result, None, stf_result)
ner_entities_name = []
for str_entity in str_entities:
ner_entities_name.append(ltp_result.text(str_entity.st, str_entity.ed))
ner_entities_name.extend(str_entity.extra_names)
est.add(ltp_result, data.entities, ner_entities_name, debug = False)
est.estimation.print_info()
|
[
"lhrshitc@163.com"
] |
lhrshitc@163.com
|
5e637fea629c39692b853e759aa3849c59f89a0c
|
bb14bfe6a27f87e41894e0bb44e1a20663deabb2
|
/Gym_learning/DQN1.py
|
e5093c0cee9a8292c463d5325813fb9a74870f2b
|
[] |
no_license
|
xuejieshougeji0826/Implementation-of-Morvan-Reinforcement-Learning-Teaching-in-Pytorch
|
f359ac0e5633a9941b15c26919c0d5a798730965
|
a41cd19826f8be5118a82963d41da5fa8316a153
|
refs/heads/master
| 2022-04-09T15:23:56.875843
| 2020-03-29T03:52:29
| 2020-03-29T03:52:29
| 238,854,947
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,848
|
py
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
np.random.seed(1)
torch.manual_seed(1)
class Net(nn.Module):
def __init__(self,
numbers_actions,
numbers_features,
lr=0.001,
reward_decay=0.9,
epsilon_greedy=0.9,
replace_target_iter=1000,
memory_size=5000,
batch_size=32,
e_greedy_increment=0.001,
):
super(Net, self).__init__()
self.numbers_actions = numbers_actions
self.numbers_features = numbers_features
# print(type(self.numbers_features), type(self.numbers_actions))
self.lr = lr
self.gamma = reward_decay
self.epsilon_greedy = epsilon_greedy
self.replace_target_iter = replace_target_iter
self.memory_size = memory_size
self.batch_size = batch_size
# self.epsilon_increment = e_greedy_increment
# self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max
self.e_greedy_increment=e_greedy_increment
# learning step
self.counter = 0
# numbers means 1. state, 2. action 3. reward 4. state_next
self.memory = np.zeros((self.memory_size, self.numbers_features * 2 + 2))
# two net, target/evaluate net
self.lay_dense = nn.Linear(self.numbers_features, 10, bias=True)
nn.init.normal_(self.lay_dense.weight,mean=0,std=0.3)
nn.init.constant_(self.lay_dense.bias,val=0.1)
self.output = nn.Linear(10, numbers_actions, bias=True)
nn.init.normal_(self.output.weight,mean=0,std=0.3)
nn.init.constant_(self.output.bias,val=0.1)
self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max
def forward(self, x):
# print("s:",x)
x = F.leaky_relu(self.lay_dense(x))
x = F.leaky_relu(self.output(x))
return x
class Deep_Q_Network(Net):
def __init__(self, numbers_actions, numbers_features, ):
super(Deep_Q_Network, self).__init__(numbers_actions, numbers_features)
self.eval_net = Net(self.numbers_actions, self.numbers_features)
self.target_net = Net(self.numbers_actions, self.numbers_features)
self.memory_counter = 0
self.optimizer = torch.optim.Adam(self.eval_net.parameters(), lr=self.lr)
# self.loss_func = torch.mse_loss()
def choose_action(self, state):
random_number = np.random.uniform()
state = torch.FloatTensor(state)
if random_number < self.epsilon_greedy:
actions_value = self.eval_net(state)
action = torch.argmax(actions_value)
# par = list(self.eval_net.named_parameters())
# print("choose_action_para:",par[0],par[1])
# print(actions_value)
# action = torch.max(actions_value, 1)[1].data.numpy()[0, 0]
# print("1")
# print(action.shape)
action=action.numpy()
# print(action)
# print("ashpe:",action.shape)
else:
action = np.random.randint(0, self.numbers_actions)
# action = torch.tensor(action)
# print(action)
# action=torch.from_numpy(action)
return action
def store_data(self, state, action, reward, state_next):
if not hasattr(self, 'memory_counter'):
self.memory_counter = 0
transition = np.hstack((state, [action, reward], state_next))
index = self.memory_counter % self.memory_size
self.memory[index, :] = transition
# print(self.memory_counter)
self.memory_counter += 1
def learn(self):
# change parameters
if self.counter % self.replace_target_iter == 0:
# self.target_net = self.eval_net.clone()
# par2 = list(self.eval_net.named_parameters())
# print("eval_before:", par2[0], par2[1])
# par2 = list(self.target_net.named_parameters())
# print("target_before:", par2[0], par2[1])
self.target_net.load_state_dict(self.eval_net.state_dict())
print('\ntarget_params_replaced\n')
# par2 = list(self.eval_net.named_parameters())
# print("eval_replaced:", par2[0], par2[1])
# par2 = list(self.target_net.named_parameters())
# print("target_replaced:", par2[0], par2[1])
if self.memory_counter > self.memory_size:
sample_index = np.random.choice(self.memory_size, size=self.batch_size)
else:
sample_index = np.random.choice(self.memory_counter, size=self.batch_size)
batch_memory = self.memory[sample_index, :]
batch_state = torch.FloatTensor(batch_memory[:, :self.numbers_features])
batch_action = torch.FloatTensor(batch_memory[:, self.numbers_features:self.numbers_features + 1].astype(int))
batch_reward = torch.FloatTensor(batch_memory[:, self.numbers_features+1:self.numbers_features + 2].transpose()[0])
# print(batch_reward)
batch_state_next = torch.FloatTensor(batch_memory[:, -self.numbers_features:])
batch_index = np.arange(self.batch_size, dtype=np.int32)
eval_act_index = batch_memory[:, self.numbers_features].astype(int)
# q_evaluate = self.eval_net(batch_state).gather(1,batch_action.long())
q_next= self.target_net(batch_state_next)
q_eval = self.eval_net(batch_state)
q_target = q_eval.clone().detach()
# print(self.numbers_features)
# print("batch_state,",batch_state)
# print("q_target",q_target)
# print("q_target.max(1)",q_next.max(1))
# print("q_target.max(1)[0]",q_next.max(1)[0])
# print("q_target[batch_index, eval_act_index]",q_next[batch_index, eval_act_index])
# print("batch_reward",batch_reward )
# print("self.gamma * q_target.max(1)[0]",self.gamma * q_next.max(1)[0])
# print("batch_reward + self.gamma * q_target.max(1)[0])",batch_reward + self.gamma * q_next.max(1)[0])
q_target[batch_index, eval_act_index] = batch_reward + (self.gamma * q_next.max(1)[0])
# print("q_target", q_target)
# print("q_eval", q_eval)
# loss=q_eval-q_target
# loss=loss.mean()
# print(q_eval-q_target)
loss1 = F.mse_loss(q_eval,q_target)
loss2 = F.mse_loss(q_eval, q_target)
# print("loss:",loss)
# par=list(self.eval_net.named_parameters())
# par2 = list(self.eval_net.named_parameters())
# print("eval_before:", par2[0], par2[1])
# par2 = list(self.target_net.named_parameters())
# print("target_before:", par2[0], par2[1])
# print("eval_before:",par[0],par[1])
self.optimizer.zero_grad()
loss2.backward()
# nn.utils.clip_grad_norm(self.eval_net.parameters(), 10.0)
self.optimizer.step()
par1 = list(self.eval_net.named_parameters())
# print("eval_after:",par1[0],par1[1])
# print("loss:",loss.data.numpy())
# print("sum,",batch_reward.sum().numpy())
self.epsilon = self.epsilon + self.e_greedy_increment if self.epsilon < self.epsilon_greedy else self.epsilon_greedy
self.counter+=1
# self.target_net.load_state_dict(self.eval_net.state_dict())
# par2 = list(self.eval_net.named_parameters())
# print("eval_replaced:", par2[0], par2[1])
# par2 = list(self.target_net.named_parameters())
# print("target_replaced:", par2[0], par2[1])
# print("counter:",self.counter)
return batch_reward.sum().numpy(), loss1.data.numpy()
if __name__ == '__main__':
q_val = Deep_Q_Network(2, 4)
a=np.zeros((1,4))
q_val.choose_action(a)
|
[
"lalalander0826@outlook.com"
] |
lalalander0826@outlook.com
|
c5616b1635e59f0282d164e3c82d7a605bd8c817
|
b606b2e8f7b87adf8a2e08165b696db2a8fa743d
|
/common/image_helper.py
|
6d701ff26801230f3656f742142f1dcff6e7440b
|
[] |
no_license
|
sheikhabbas78/practice_8
|
66a68c3ba00314fcb21d3bd8c87180fc316f477a
|
2f32e5cbdea92ae45d862ac8e1814c65fbe0b843
|
refs/heads/master
| 2022-12-17T09:26:16.546306
| 2020-09-21T04:49:17
| 2020-09-21T04:49:17
| 297,230,525
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,386
|
py
|
import os
import re
from werkzeug.datastructures import FileStorage
from flask_uploads import IMAGES, UploadSet
from typing import Union
IMAGE_SET = UploadSet("images", IMAGES)
def save_image(image: FileStorage, folder:str= None, filename: str= None,) -> str:
return IMAGE_SET.save(image, folder, filename )
def get_path(filename: str, folder: str) -> str:
return IMAGE_SET.path(filename, folder)
def find_image_any_format(filename: str, folder: str) -> Union['str', 'None']:
for _format in IMAGES:
avatar = f'{filename}.{_format}'
avatar_path = IMAGE_SET.path(filename, folder)
if os.path.isfile(avatar_path):
return avatar_path
return None
def _retreive_filename(file: Union[str, FileStorage]) -> str:
if isinstance(file, FileStorage):
return file.filename
return file
def is_filename_safe(file: Union[str, FileStorage]) ->bool:
filename = _retreive_filename(file)
allowed_format = "|".join(IMAGES)
regex = f"^[a-zA-Z0-9][a-zA-Z0-9_()-\.]*\.({allowed_format})$"
return re.match(regex, filename) is not None
def get_basename(file: Union[str, FileStorage]) -> str:
filename = _retreive_filename(file)
return os.path.split(filename)[1]
def get_extension(file: Union[str, FileStorage]) -> str:
filename = _retreive_filename(file)
return os.path.splitext(filename)[1]
|
[
"sheikhabbas78@gmail.com"
] |
sheikhabbas78@gmail.com
|
05f09500cb332bb8594080c241eb68ae60f06b4d
|
139f248f790b7a22b7fb04bca9a7bc8b827fa94b
|
/day5.py
|
a008a662a94703664959e28e8e6370caa09ae87e
|
[] |
no_license
|
juneju-darad/HackerRank-30-Days-of-Code
|
63d4ae724a4a50446ef9c3a044a68442c2110391
|
b3bb5cdf9ec26d107362c904f1e748988e446a2c
|
refs/heads/main
| 2023-03-07T15:05:34.533005
| 2021-02-18T05:13:31
| 2021-02-18T05:13:31
| 309,017,753
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 227
|
py
|
#!/bin/python3
import math
import os
import random
import re
import sys
if __name__ == '__main__':
n = int(input())
if 2 <= n <= 20:
for i in range(1,11):
print('{} x {} = {}'.format(n, i, n*i))
|
[
"djuneju@gmail.com"
] |
djuneju@gmail.com
|
867c39b81f0bd2f14694cd585a733a351b7c50fa
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/adjectives/_spinier.py
|
0482c9921fbb9ca2d0ffe46b31b1181f2f50e5f1
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 243
|
py
|
from xai.brain.wordbase.adjectives._spiny import _SPINY
#calss header
class _SPINIER(_SPINY, ):
def __init__(self,):
_SPINY.__init__(self)
self.name = "SPINIER"
self.specie = 'adjectives'
self.basic = "spiny"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
cc30b966fa79889e04d7cbb5eb68f98d897e982b
|
94ea6ef2135e3914f772d08dd1f429e876896225
|
/transition.py
|
17490cf0bda5ae4f51e65fe2995e84eba8e717d3
|
[] |
no_license
|
Ogiwara-CostlierRain464/DQN-performance-check
|
27de94186a53bd2f9b5880a2abcdca2571c6cac2
|
6940163003c3e9c2d7e6467e5ebfe62d59092925
|
refs/heads/master
| 2020-12-02T03:53:59.688141
| 2020-01-07T13:10:10
| 2020-01-07T13:10:10
| 230,879,636
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 125
|
py
|
from collections import namedtuple
Transition = namedtuple(
"Transition", ("state", "action", "next_state", "reward")
)
|
[
"yushiogiwara@gmail.com"
] |
yushiogiwara@gmail.com
|
238ec91f069f7201b85bb750838f5ebd9b18ecd9
|
ce083128fa87ca86c65059893aa8882d088461f5
|
/python/pytest-labs/.venv/lib/python3.6/site-packages/facebook_business/adobjects/productdaeventsamplesbatch.py
|
6155974b1405f7c8bc729960a751f6b815d8bc6d
|
[] |
no_license
|
marcosptf/fedora
|
581a446e7f81d8ae9a260eafb92814bc486ee077
|
359db63ff1fa79696b7bc803bcfa0042bff8ab44
|
refs/heads/master
| 2023-04-06T14:53:40.378260
| 2023-03-26T00:47:52
| 2023-03-26T00:47:52
| 26,059,824
| 6
| 5
| null | 2022-12-08T00:43:21
| 2014-11-01T18:48:56
| null |
UTF-8
|
Python
| false
| false
| 2,489
|
py
|
# Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.abstractobject import AbstractObject
"""
This class is auto-generated.
For any issues or feature requests related to this class, please let us know on
github and we'll fix in our codegen framework. We'll not be able to accept
pull request for this class.
"""
class ProductDaEventSamplesBatch(
AbstractObject,
):
def __init__(self, api=None):
super(ProductDaEventSamplesBatch, self).__init__()
self._isProductDaEventSamplesBatch = True
self._api = api
class Field(AbstractObject.Field):
samples = 'samples'
time_start = 'time_start'
time_stop = 'time_stop'
class AggregationType:
content_id = 'CONTENT_ID'
content_url = 'CONTENT_URL'
class Event:
viewcontent = 'ViewContent'
addtocart = 'AddToCart'
purchase = 'Purchase'
initiatecheckout = 'InitiateCheckout'
search = 'Search'
lead = 'Lead'
addtowishlist = 'AddToWishlist'
_field_types = {
'samples': 'list<Object>',
'time_start': 'unsigned int',
'time_stop': 'unsigned int',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
field_enum_info['AggregationType'] = ProductDaEventSamplesBatch.AggregationType.__dict__.values()
field_enum_info['Event'] = ProductDaEventSamplesBatch.Event.__dict__.values()
return field_enum_info
|
[
"marcosptf@yahoo.com.br"
] |
marcosptf@yahoo.com.br
|
646f8eb9eed677041d3d32e06c1b5de1a4ef703b
|
4de7e40715e6a7f9ff22b66146dbe0a627db2e76
|
/ellipse-fit-evaluation/python/tests/test_evaluate_ellipse_fit.py
|
6aaf1234ef557c2c9c0ce614124966130c51410c
|
[] |
no_license
|
hackkosice/marathon-thermofisher-challenge
|
9cd4f208af73bee096b50b9b895ccffafef15af0
|
c8300083094b49a70284dbe5d728ef6e9f2404b6
|
refs/heads/master
| 2023-04-16T20:31:44.443945
| 2021-04-24T00:26:39
| 2021-04-24T00:26:39
| 361,029,413
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,140
|
py
|
import unittest
from ellipsefitevaluation import evaluate_ellipse_fit
class TestEvaluateEllipseFit(unittest.TestCase):
def test_perfect_fit_1(self):
fit_ellipse = {'center': (626.76, 494.98), 'axes': (387.96, 381.45), 'angle': 170}
score = evaluate_ellipse_fit('2018-02-15 17.26.47.474000.tiff', fit_ellipse,
csv_filepath='./tests/ground_truths_train.csv')
self.assertEqual(score, 1.0)
def test_perfect_fit_2(self):
fit_ellipse = {'center': (635.86, 521.4), 'axes': (168.05, 165.09), 'angle': 164}
score = evaluate_ellipse_fit('2018-02-15 17.27.27.162000.tiff', fit_ellipse,
csv_filepath='./tests/ground_truths_train.csv')
self.assertEqual(score, 1.0)
def test_perfect_fit_3(self):
fit_ellipse = {'center': (847.58, 751.44), 'axes': (33.93, 30.67), 'angle': 18}
score = evaluate_ellipse_fit('2018-02-15 17.27.54.680000.tiff', fit_ellipse,
csv_filepath='./tests/ground_truths_train.csv')
self.assertEqual(score, 1.0)
def test_partial_fit_1(self):
fit_ellipse = {'center': (630.0, 500.0), 'axes': (390.0, 380.0), 'angle': 170}
score = evaluate_ellipse_fit('2018-02-15 17.26.47.474000.tiff', fit_ellipse,
csv_filepath='./tests/ground_truths_train.csv')
self.assertAlmostEqual(score, 0.9909615)
def test_partial_fit_2(self):
fit_ellipse = {'center': (600.0, 500.0), 'axes': (100.0, 100.0), 'angle': 200}
score = evaluate_ellipse_fit('2018-02-15 17.27.27.162000.tiff', fit_ellipse,
csv_filepath='./tests/ground_truths_train.csv')
self.assertAlmostEqual(score, 0.3624268)
def test_partial_fit_3(self):
fit_ellipse = {'center': (900.0, 800.0), 'axes': (100.0, 100.0), 'angle': 0}
score = evaluate_ellipse_fit('2018-02-15 17.27.54.680000.tiff', fit_ellipse,
csv_filepath='./tests/ground_truths_train.csv')
self.assertAlmostEqual(score, 0.1081905)
def test_invalid_fit(self):
fit_ellipse = {'center': (200, 100), 'axes': (50, 25), 'angle': 0}
score = evaluate_ellipse_fit('2018-02-15 17.26.47.474000.tiff', fit_ellipse,
csv_filepath='./tests/ground_truths_train.csv')
self.assertEqual(score, 0.0)
def test_nonempty_fit_with_empty_gt(self):
fit_ellipse = {'center': (200, 100), 'axes': (50, 25), 'angle': 0}
score = evaluate_ellipse_fit('2018-02-15 17.36.17.793000.tiff', fit_ellipse,
csv_filepath='./tests/ground_truths_train.csv')
self.assertEqual(score, 0.0)
def test_empty_fit_with_empty_gt(self):
fit_ellipse = None
score = evaluate_ellipse_fit('2018-02-15 17.36.17.793000.tiff', fit_ellipse,
csv_filepath='./tests/ground_truths_train.csv')
self.assertEqual(score, 1.0)
def test_empty_fit_with_nonempty_gt(self):
fit_ellipse = None
score = evaluate_ellipse_fit('2018-02-15 17.26.47.474000.tiff', fit_ellipse,
csv_filepath='./tests/ground_truths_train.csv')
self.assertEqual(score, 0.0)
def test_invalid_tiff_filename(self):
with self.assertRaises(ValueError):
fit_ellipse = {'center': (200, 100), 'axes': (50, 25), 'angle': 0}
score = evaluate_ellipse_fit('nonexisting.tiff', fit_ellipse,
csv_filepath='./tests/ground_truths_train.csv')
def test_invalid_csv_filename(self):
with self.assertRaises(FileNotFoundError):
fit_ellipse = {'center': (200, 100), 'axes': (50, 25), 'angle': 0}
score = evaluate_ellipse_fit('2018-02-15 17.26.47.474000.tiff', fit_ellipse,
csv_filepath='./tests/nonexisting.csv')
if __name__ == "__main__":
unittest.main()
|
[
"micko.juraj64@gmail.com"
] |
micko.juraj64@gmail.com
|
72fca8dcd312636b3ac3e7e1c1c4e4f6b348449d
|
c22f50d08d858bcb72e2a84851c21c490a4f5d21
|
/poll/forms.py
|
bfc87f9f1e22b4aa1a29870e3b6bb5544c775656
|
[] |
no_license
|
ragnarok22/airport
|
cb4ab09f8d836b568c927ad38534b60263bc83a4
|
4edfcdf38923c7c6bcb96ec6df0f806449810176
|
refs/heads/master
| 2021-01-20T03:29:53.229684
| 2017-05-22T13:00:01
| 2017-05-22T13:00:01
| 89,547,824
| 1
| 0
| null | 2017-05-09T02:00:36
| 2017-04-27T02:39:48
|
Python
|
UTF-8
|
Python
| false
| false
| 812
|
py
|
from django import forms
from poll.models import NationalPassengerPoll, InternationalPassengerPoll, AirLineRepresentPoll
class NationalPassengerForm(forms.ModelForm):
class Meta:
model = NationalPassengerPoll
fields = '__all__'
widgets = {
'date': forms.DateInput(attrs={'class': 'datepicker'})
}
class InternationalPassengerForm(forms.ModelForm):
class Meta:
model = InternationalPassengerPoll
fields = '__all__'
widgets = {
'date_out': forms.DateInput(attrs={'class': 'datepicker'})
}
class AirLineRepresentForm(forms.ModelForm):
class Meta:
model = AirLineRepresentPoll
fields = '__all__'
widgets = {
'date': forms.DateInput(attrs={'class': 'datepicker'})
}
|
[
"rhernandeza@uho.edu.cu"
] |
rhernandeza@uho.edu.cu
|
9462483fec28c52030cc7e091d8e536f31f1b697
|
5b1116369c5a003e0a70506f14cf05c6fa79e76d
|
/quentinsblog/urls.py
|
17210bfbc53e04fe1fb9e35487df3437f1dab9f3
|
[
"MIT"
] |
permissive
|
VonStruddle/django-blog
|
a6619ddfbf615304e84acc8cfd555461a18a110e
|
7abba159fa0bb78094fa353e12e9cd523fdb30d8
|
refs/heads/master
| 2021-06-25T14:02:07.441496
| 2017-09-13T20:14:16
| 2017-09-13T20:14:16
| 102,965,289
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,110
|
py
|
"""quentinsblog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import url
from django.conf.urls.static import static
from django.contrib import admin
from posts import views as posts_views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', posts_views.home, name='home'),
url(r'^post/(?P<post_slug>[a-z0-9]+(?:-[a-z0-9]+)*)/$',
posts_views.post_detail, name='post_detail'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"quentin.durantay@gmail.com"
] |
quentin.durantay@gmail.com
|
5440c399fc472d9aec6c0adc203267a050e8c7d0
|
747febe786dd6b7fd6c63cfe73dbe3023354daa8
|
/src/the_tale/the_tale/game/quests/logic.py
|
69a11b9a515ce9d1b4452b825829a94900ea8fa3
|
[
"BSD-3-Clause"
] |
permissive
|
the-tale/the-tale
|
4e4b8d91dc873a5fb935fe58e9721a877baa6d3f
|
e8450bd2332344da805b1851e728da5a3e5bf0ef
|
refs/heads/develop
| 2023-08-01T13:53:46.835667
| 2022-12-25T18:04:56
| 2022-12-25T18:04:56
| 1,949,167
| 98
| 52
|
BSD-3-Clause
| 2023-02-15T18:57:33
| 2011-06-24T18:49:48
|
Python
|
UTF-8
|
Python
| false
| false
| 27,529
|
py
|
import smart_imports
smart_imports.all()
WORLD_RESTRICTIONS = [questgen_restrictions.SingleLocationForObject(),
questgen_restrictions.ReferencesIntegrity()]
QUEST_RESTRICTIONS = [questgen_restrictions.SingleStartStateWithNoEnters(),
questgen_restrictions.FinishStateExists(),
questgen_restrictions.AllStatesHasJumps(),
questgen_restrictions.ConnectedStateJumpGraph(),
questgen_restrictions.NoCirclesInStateJumpGraph(),
questgen_restrictions.MultipleJumpsFromNormalState(),
questgen_restrictions.ChoicesConsistency(),
questgen_restrictions.QuestionsConsistency(),
questgen_restrictions.FinishResultsConsistency()]
QUESTS_BASE = questgen_quests_quests_base.QuestsBase()
QUESTS_BASE += [quest.quest_class for quest in relations.QUESTS.records]
class HeroQuestInfo(object):
__slots__ = ('id',
'level',
'position_place_id',
'is_first_quest_path_required',
'preferences_mob_id',
'preferences_place_id',
'preferences_friend_id',
'preferences_enemy_id',
'preferences_equipment_slot',
'preferences_quests_region_id',
'preferences_quests_region_size',
'interfered_persons',
'quests_priorities',
'excluded_quests',
'prefered_quest_markers')
def __init__(self,
id,
level,
position_place_id,
is_first_quest_path_required,
preferences_mob_id,
preferences_place_id,
preferences_friend_id,
preferences_enemy_id,
preferences_equipment_slot,
preferences_quests_region_id,
preferences_quests_region_size,
interfered_persons,
quests_priorities,
excluded_quests,
prefered_quest_markers):
self.id = id
self.level = level
self.position_place_id = position_place_id
self.is_first_quest_path_required = is_first_quest_path_required
self.preferences_mob_id = preferences_mob_id
self.preferences_place_id = preferences_place_id
self.preferences_friend_id = preferences_friend_id
self.preferences_enemy_id = preferences_enemy_id
self.preferences_equipment_slot = preferences_equipment_slot
self.preferences_quests_region_id = preferences_quests_region_id
self.preferences_quests_region_size = preferences_quests_region_size
self.interfered_persons = interfered_persons
self.quests_priorities = quests_priorities
self.excluded_quests = excluded_quests
self.prefered_quest_markers = prefered_quest_markers
@property
def position_place(self):
return places_storage.places[self.position_place_id]
def serialize(self):
return {'id': self.id,
'level': self.level,
'position_place_id': self.position_place_id,
'is_first_quest_path_required': self.is_first_quest_path_required,
'preferences_mob_id': self.preferences_mob_id,
'preferences_place_id': self.preferences_place_id,
'preferences_friend_id': self.preferences_friend_id,
'preferences_enemy_id': self.preferences_enemy_id,
'preferences_equipment_slot': self.preferences_equipment_slot.value if self.preferences_equipment_slot else None,
'preferences_quests_region_id': self.preferences_quests_region_id if self.preferences_quests_region_id else None,
'preferences_quests_region_size': self.preferences_quests_region_size,
'interfered_persons': self.interfered_persons,
'quests_priorities': [(quest_type.value, priority) for quest_type, priority in self.quests_priorities],
'excluded_quests': list(sorted(self.excluded_quests)),
'prefered_quest_markers': list(sorted(self.prefered_quest_markers))}
@classmethod
def deserialize(cls, data):
return cls(id=data['id'],
level=data['level'],
position_place_id=data['position_place_id'],
is_first_quest_path_required=data['is_first_quest_path_required'],
preferences_mob_id=data['preferences_mob_id'],
preferences_place_id=data['preferences_place_id'],
preferences_friend_id=data['preferences_friend_id'],
preferences_enemy_id=data['preferences_enemy_id'],
preferences_equipment_slot=heroes_relations.EQUIPMENT_SLOT(data['preferences_equipment_slot']) if data['preferences_equipment_slot'] is not None else None,
preferences_quests_region_id=data['preferences_quests_region_id'],
preferences_quests_region_size=data['preferences_quests_region_size'],
interfered_persons=data['interfered_persons'],
quests_priorities=[(relations.QUESTS(quest_type), priority) for quest_type, priority in data['quests_priorities']],
excluded_quests=set(data['excluded_quests']),
prefered_quest_markers=set(data['prefered_quest_markers']))
def __eq__(self, other):
return self.serialize() == other.serialize()
def __neq__(self, other):
return not self.__eq__(other)
def choose_quest_path_url():
return utils_urls.url('game:quests:api-choose', api_version='1.0', api_client=django_settings.API_CLIENT)
def fact_place(place):
return questgen_facts.Place(uid=uids.place(place.id),
terrains=[terrain.value for terrain in map_storage.cells.place_terrains(place.id)],
externals={'id': place.id},
type=place.modifier_quest_type())
def fact_mob(mob):
return questgen_facts.Mob(uid=uids.mob(mob.id),
terrains=[terrain.value for terrain in mob.terrains],
externals={'id': mob.id})
def fact_person(person):
return questgen_facts.Person(uid=uids.person(person.id),
profession=person.type.quest_profession,
externals={'id': person.id,
'type': game_relations.ACTOR.PERSON.value})
def fact_emissary(emissary):
return questgen_facts.Person(uid=uids.emissary(emissary.id),
profession=None,
externals={'id': emissary.id,
'type': game_relations.ACTOR.EMISSARY.value})
def fact_social_connection(connection_type, person_uid, connected_person_uid):
return questgen_facts.SocialConnection(person_to=person_uid,
person_from=connected_person_uid,
type=connection_type.questgen_type)
def fact_located_in(person):
return questgen_facts.LocatedIn(object=uids.person(person.id), place=uids.place(person.place.id))
def fill_places(kb, places):
for place in places:
uid = uids.place(place.id)
if uid in kb:
continue
kb += fact_place(place)
def setup_places(kb, hero_info):
center_place_id = hero_info.position_place_id
quests_region_size = hero_info.preferences_quests_region_size
if hero_info.preferences_quests_region_id is not None:
center_place_id = hero_info.preferences_quests_region_id
if hero_info.is_first_quest_path_required:
quests_region_size = 2
places = places_storage.places.nearest_places(center_place_id,
number=quests_region_size)
if len(places) < 2:
places = places_storage.places.all()
fill_places(kb, places)
hero_position_uid = uids.place(hero_info.position_place_id)
if hero_position_uid not in kb:
kb += fact_place(places_storage.places[hero_info.position_place_id])
kb += questgen_facts.LocatedIn(object=uids.hero(hero_info.id), place=hero_position_uid)
def setup_person(kb, person):
if uids.place(person.place.id) not in kb:
kb += fact_place(person.place)
person_uid = uids.person(person.id)
if person_uid in kb:
return kb[person_uid]
f_person = fact_person(person)
kb += f_person
kb += fact_located_in(person)
return f_person
def setup_persons(kb, hero_info):
for person in persons_storage.persons.all():
if uids.place(person.place.id) not in kb:
continue
setup_person(kb, person)
def setup_social_connections(kb):
persons_in_kb = {f_person.externals['id']: f_person.uid
for f_person in kb.filter(questgen_facts.Person)
if f_person.externals['type'] == game_relations.ACTOR.PERSON.value}
for person_id, person_uid in persons_in_kb.items():
person = persons_storage.persons[person_id]
for connection_type, connected_person_id in persons_storage.social_connections.get_person_connections(person):
if connected_person_id not in persons_in_kb:
continue
kb += fact_social_connection(connection_type, person_uid, persons_in_kb[connected_person_id])
def setup_preferences(kb, hero_info):
hero_uid = uids.hero(hero_info.id)
if hero_info.preferences_mob_id is not None:
f_mob = fact_mob(mobs_storage.mobs[hero_info.preferences_mob_id])
if f_mob.uid not in kb:
kb += f_mob
kb += questgen_facts.PreferenceMob(object=hero_uid, mob=f_mob.uid)
if hero_info.preferences_place_id is not None:
f_place = fact_place(places_storage.places[hero_info.preferences_place_id])
if f_place.uid not in kb:
kb += f_place
kb += questgen_facts.PreferenceHometown(object=hero_uid, place=f_place.uid)
if hero_info.preferences_friend_id is not None:
friend = persons_storage.persons[hero_info.preferences_friend_id]
f_person = setup_person(kb, friend)
kb += questgen_facts.PreferenceFriend(object=hero_uid, person=f_person.uid)
kb += questgen_facts.ExceptBadBranches(object=f_person.uid)
if hero_info.preferences_enemy_id:
enemy = persons_storage.persons[hero_info.preferences_enemy_id]
f_person = setup_person(kb, enemy)
kb += questgen_facts.PreferenceEnemy(object=hero_uid, person=f_person.uid)
kb += questgen_facts.ExceptGoodBranches(object=f_person.uid)
if hero_info.preferences_equipment_slot:
kb += questgen_facts.PreferenceEquipmentSlot(object=hero_uid, equipment_slot=hero_info.preferences_equipment_slot.value)
def get_knowledge_base(hero_info, without_restrictions=False): # pylint: disable=R0912
kb = questgen_knowledge_base.KnowledgeBase()
hero_uid = uids.hero(hero_info.id)
kb += questgen_facts.Hero(uid=hero_uid, externals={'id': hero_info.id})
setup_places(kb, hero_info)
setup_persons(kb, hero_info)
setup_preferences(kb, hero_info)
setup_social_connections(kb)
if not without_restrictions:
for person in persons_storage.persons.all():
if person.place.id == hero_info.position_place_id and person.id in hero_info.interfered_persons:
kb += questgen_facts.NotFirstInitiator(person=uids.person(person.id))
kb.validate_consistency(WORLD_RESTRICTIONS)
kb += [questgen_facts.UpgradeEquipmentCost(money=prototypes.QuestPrototype.upgrade_equipment_cost(hero_info))]
return kb
def create_random_quest_for_hero(hero_info, logger):
constructor = place_quest_constructor_fabric(place=hero_info.position_place,
person_action=None)
return create_random_quest_with_constructor(hero_info,
constructor,
logger,
excluded_quests=hero_info.excluded_quests,
no_restrictions_on_fail=True)
def create_random_quest_for_place(hero_info, place, person_action, logger):
constructor = place_quest_constructor_fabric(place=place,
person_action=person_action)
excluded_quests = [record.quest_class.TYPE
for record in relations.QUESTS.records
if not record.allowed_for_cards]
return create_random_quest_with_constructor(hero_info,
constructor,
logger,
excluded_quests=excluded_quests,
no_restrictions_on_fail=False)
def create_random_quest_for_person(hero_info, person, person_action, logger):
constructor = person_quest_constructor_fabric(person=person,
person_action=person_action)
excluded_quests = [record.quest_class.TYPE
for record in relations.QUESTS.records
if not record.allowed_for_cards]
return create_random_quest_with_constructor(hero_info,
constructor,
logger,
excluded_quests=excluded_quests,
no_restrictions_on_fail=False)
def create_random_quest_for_emissary(hero_info, emissary, person_action, logger):
constructor = emissary_quest_constructor_fabric(emissary=emissary,
person_action=person_action)
excluded_quests = [record.quest_class.TYPE
for record in relations.QUESTS.records
if not record.allowed_for_cards]
return create_random_quest_with_constructor(hero_info,
constructor,
logger,
excluded_quests=excluded_quests,
no_restrictions_on_fail=False)
def create_random_quest_with_constructor(hero_info, constructor, logger, excluded_quests, no_restrictions_on_fail):
start_time = time.time()
normal_mode = True
quests = utils_logic.shuffle_values_by_priority(hero_info.quests_priorities)
logger.info('hero[%(hero_id).6d]: try is_normal: %(is_normal)s (allowed: %(allowed)s) (excluded: %(excluded)s)' %
{'hero_id': hero_info.id,
'is_normal': normal_mode,
'allowed': ', '.join(quest.quest_class.TYPE for quest in quests),
'excluded': ', '.join(excluded_quests)})
quest_type, knowledge_base = try_to_create_random_quest_for_hero(hero_info,
quests,
excluded_quests,
without_restrictions=False,
constructor=constructor,
logger=logger)
if knowledge_base is None and no_restrictions_on_fail:
logger.info('hero[%(hero_id).6d]: first try failed' % {'hero_id': hero_info.id})
normal_mode = False
quest_type, knowledge_base = try_to_create_random_quest_for_hero(hero_info,
quests,
excluded_quests=[],
without_restrictions=True,
constructor=constructor,
logger=logger)
spent_time = time.time() - start_time
logger.info('hero[%(hero_id).6d]: %(spent_time)s is_normal: %(is_normal)s %(quest_type)20s (allowed: %(allowed)s) (excluded: %(excluded)s)' %
{'hero_id': hero_info.id,
'spent_time': spent_time,
'is_normal': normal_mode,
'quest_type': quest_type,
'allowed': ', '.join(quest.quest_class.TYPE for quest in quests),
'excluded': ', '.join(excluded_quests)})
return knowledge_base
def try_to_create_random_quest_for_hero(hero_info, quests, excluded_quests, without_restrictions, constructor, logger):
for quest_type in quests:
if quest_type.quest_class.TYPE in excluded_quests:
continue
try:
return quest_type, _create_random_quest_for_hero(hero_info,
constructor=constructor,
start_quests=[quest_type.quest_class.TYPE],
without_restrictions=without_restrictions)
except questgen_exceptions.RollBackError as e:
logger.info('hero[%(hero_id).6d]: can not create quest <%(quest_type)s>: %(exception)s' %
{'hero_id': hero_info.id,
'quest_type': quest_type,
'exception': e})
continue
return None, None
@utils_decorators.retry_on_exception(max_retries=conf.settings.MAX_QUEST_GENERATION_RETRIES,
exceptions=[questgen_exceptions.RollBackError])
def _create_random_quest_for_hero(hero_info, constructor, start_quests, without_restrictions=False):
knowledge_base = get_knowledge_base(hero_info, without_restrictions=without_restrictions)
selector = questgen_selectors.Selector(knowledge_base, QUESTS_BASE, social_connection_probability=0)
knowledge_base += constructor(selector, start_quests)
questgen_transformators.activate_events(knowledge_base) # TODO: after remove restricted states
questgen_transformators.remove_restricted_states(knowledge_base)
questgen_transformators.remove_broken_states(knowledge_base) # MUST be called after all graph changes
questgen_transformators.determine_default_choices(knowledge_base, preferred_markers=hero_info.prefered_quest_markers) # MUST be called after all graph changes and on valid graph
questgen_transformators.remove_unused_actors(knowledge_base)
knowledge_base.validate_consistency(WORLD_RESTRICTIONS)
knowledge_base.validate_consistency(QUEST_RESTRICTIONS)
return knowledge_base
def place_quest_constructor_fabric(place, person_action):
def constructor(selector, start_quests):
f_place = fact_place(place)
if f_place.uid not in selector._kb:
selector._kb += f_place
if person_action is not None:
if person_action.is_HELP:
selector._kb += questgen_facts.OnlyGoodBranches(object=f_place.uid)
elif person_action.is_HARM:
selector._kb += questgen_facts.OnlyBadBranches(object=f_place.uid)
for person in place.persons:
f_person = setup_person(selector._kb, person)
if person_action.is_HELP:
remove_help_restrictions(selector._kb, f_person.uid, f_place.uid)
selector._kb += questgen_facts.OnlyGoodBranches(object=f_person.uid)
elif person_action.is_HARM:
remove_harm_restrictions(selector._kb, f_person.uid, f_place.uid)
selector._kb += questgen_facts.OnlyBadBranches(object=f_person.uid)
selector.reserve(f_place)
return selector.create_quest_from_place(nesting=0,
initiator_position=f_place,
allowed=start_quests,
excluded=[],
tags=('can_start', ))
return constructor
def emissary_quest_constructor_fabric(emissary, person_action):
def constructor(selector, start_quests):
f_emissary = fact_emissary(emissary)
f_emissary_place = fact_place(emissary.place)
selector._kb += f_emissary
selector._kb += questgen_facts.LocatedIn(object=f_emissary.uid, place=uids.place(emissary.place_id))
if f_emissary_place.uid not in selector._kb:
selector._kb += f_emissary_place
if person_action.is_HELP:
remove_help_restrictions(selector._kb, f_emissary.uid, f_emissary_place.uid)
selector._kb += questgen_facts.OnlyGoodBranches(object=f_emissary.uid)
elif person_action.is_HARM:
remove_harm_restrictions(selector._kb, f_emissary.uid, f_emissary_place.uid)
selector._kb += questgen_facts.OnlyBadBranches(object=f_emissary.uid)
else:
raise NotImplementedError
selector.reserve(f_emissary)
selector.reserve(f_emissary_place)
return selector.create_quest_from_person(nesting=0,
initiator=f_emissary,
allowed=start_quests,
excluded=[],
tags=('can_start', ))
return constructor
def remove_restrictions(kb, Fact, object_uid):
to_remove = []
for fact in kb.filter(Fact):
if fact.object == object_uid:
to_remove.append(fact)
kb -= to_remove
def remove_help_restrictions(kb, person_uid, place_uid):
remove_restrictions(kb, questgen_facts.OnlyBadBranches, place_uid)
remove_restrictions(kb, questgen_facts.ExceptGoodBranches, place_uid)
remove_restrictions(kb, questgen_facts.OnlyBadBranches, person_uid)
remove_restrictions(kb, questgen_facts.ExceptGoodBranches, person_uid)
def remove_harm_restrictions(kb, person_uid, place_uid):
remove_restrictions(kb, questgen_facts.OnlyGoodBranches, place_uid)
remove_restrictions(kb, questgen_facts.ExceptBadBranches, place_uid)
remove_restrictions(kb, questgen_facts.OnlyGoodBranches, person_uid)
remove_restrictions(kb, questgen_facts.ExceptBadBranches, person_uid)
def person_quest_constructor_fabric(person, person_action):
def constructor(selector, start_quests):
place_uid = uids.place(person.place_id)
f_person = setup_person(selector._kb, person)
if person_action.is_HELP:
remove_help_restrictions(selector._kb, f_person.uid, place_uid)
selector._kb += questgen_facts.OnlyGoodBranches(object=f_person.uid)
elif person_action.is_HARM:
remove_harm_restrictions(selector._kb, f_person.uid, place_uid)
selector._kb += questgen_facts.OnlyBadBranches(object=f_person.uid)
else:
raise NotImplementedError
selector.reserve(f_person)
selector.reserve(selector._kb[place_uid])
return selector.create_quest_from_person(nesting=0,
initiator=f_person,
allowed=start_quests,
excluded=[],
tags=('can_start', ))
return constructor
def create_hero_info(hero):
quests_priorities = hero.get_quests_priorities()
return HeroQuestInfo(id=hero.id,
level=hero.level,
position_place_id=hero.position.cell().nearest_place_id,
is_first_quest_path_required=hero.is_first_quest_path_required,
preferences_mob_id=hero.preferences.mob.id if hero.preferences.mob else None,
preferences_place_id=hero.preferences.place.id if hero.preferences.place else None,
preferences_friend_id=hero.preferences.friend.id if hero.preferences.friend else None,
preferences_enemy_id=hero.preferences.enemy.id if hero.preferences.enemy else None,
preferences_equipment_slot=hero.preferences.equipment_slot,
preferences_quests_region_id=hero.preferences.quests_region.id if hero.preferences.quests_region else None,
preferences_quests_region_size=hero.preferences.quests_region_size,
interfered_persons=hero.quests.get_interfered_persons(),
quests_priorities=quests_priorities,
excluded_quests=hero.quests.excluded_quests(len(quests_priorities) // 2),
prefered_quest_markers=hero.prefered_quest_markers())
def request_quest_for_hero(hero, emissary_id=None, place_id=None, person_id=None, person_action=None):
hero_info = create_hero_info(hero)
amqp_environment.environment.workers.quests_generator.cmd_request_quest(hero.account_id,
hero_info.serialize(),
emissary_id=emissary_id,
place_id=place_id,
person_id=person_id,
person_action=person_action)
def setup_quest_for_hero(hero, knowledge_base_data):
# do nothing if hero has already had quest
if not hero.actions.current_action.searching_quest:
return
knowledge_base = questgen_knowledge_base.KnowledgeBase.deserialize(knowledge_base_data, fact_classes=questgen_facts.FACTS)
states_to_percents = questgen_analysers.percents_collector(knowledge_base)
quest = prototypes.QuestPrototype(hero=hero, knowledge_base=knowledge_base, states_to_percents=states_to_percents)
# устанавливаем квест перед его началом,
# чтобы он корректно записался в стек
hero.actions.current_action.setup_quest(quest)
if quest.machine.can_do_step():
quest.machine.step() # do first step to setup pointer
# заставляем героя выполнить условия стартового узла задания
# необходимо для случая, когда квест инициирует игрок и героя не находится в точке начала задания
quest.machine.check_requirements(quest.machine.current_state)
quest.machine.satisfy_requirements(quest.machine.current_state)
def extract_person_type(fact):
return game_relations.ACTOR(fact.externals.get('type', game_relations.ACTOR.PERSON.value))
|
[
"a.eletsky@gmail.com"
] |
a.eletsky@gmail.com
|
a1b913115e78d26d413641084dee0f60d1264998
|
78fcc63ad88ae14ba3f8698ea56c3e3b4090b853
|
/aysha/__init__.py
|
7ac486279be912d587b2323a257eb32250dd168c
|
[] |
no_license
|
teou/aysha
|
e57fb7e6b148d632b52fb1fe63660e7db957f8d8
|
6af1d960cb40679b7102a7e38f35723e4ec65ebc
|
refs/heads/master
| 2020-06-07T10:15:15.025302
| 2014-08-14T03:11:08
| 2014-08-14T03:11:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22
|
py
|
__author__ = 'liuhuo'
|
[
"huoliu@gmail.com"
] |
huoliu@gmail.com
|
8616afca261d23bc6fc4eb7c79ee623cc24d4792
|
24332e4890694bffde4eb59954d88de44f4f9f36
|
/lib/__init__.py
|
92d03a5b534222e90dbb8ae3461a38bbf6bf3919
|
[] |
no_license
|
muzidudu/savevid
|
40523d5be27079018f390e5b06ff20575459bcee
|
93402e81d2a2c8d4ccf196dbef34186962caa163
|
refs/heads/master
| 2021-05-30T11:28:21.067445
| 2016-01-19T05:42:00
| 2016-01-19T05:42:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 27
|
py
|
__all__ = ["site_factory"]
|
[
"zqshen.1985@gmail.com"
] |
zqshen.1985@gmail.com
|
4637ad8e57ec88e45fda29f4a08e4b0144d0f669
|
f0e11aeb7b5bd96c828cf39728eb2fa523f320df
|
/snapflow/migrations/versions/7d5638b5d74d_initial_migration.py
|
8b85094b11b08e55368d3320bb0b4bdb56eecc13
|
[
"BSD-3-Clause"
] |
permissive
|
sathya-reddy-m/snapflow
|
7bc1fa7de7fd93b81e5b0538ba73ca68e9e109db
|
9e9e73f0d5a3d6b92f528ef1e2840ad92582502e
|
refs/heads/master
| 2023-05-01T05:14:08.479073
| 2021-05-21T00:14:56
| 2021-05-21T00:14:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,528
|
py
|
"""Initial migration
Revision ID: 7d5638b5d74d
Revises:
Create Date: 2021-05-17 20:55:42.613348
"""
import snapflow
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "7d5638b5d74d"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"_snapflow_data_block_metadata",
sa.Column("env_id", sa.String(length=64), nullable=True),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.Column("id", sa.String(length=128), nullable=False),
sa.Column("inferred_schema_key", sa.String(length=128), nullable=True),
sa.Column("nominal_schema_key", sa.String(length=128), nullable=True),
sa.Column("realized_schema_key", sa.String(length=128), nullable=False),
sa.Column("record_count", sa.Integer(), nullable=True),
sa.Column("created_by_node_key", sa.String(length=128), nullable=True),
sa.Column("deleted", sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"_snapflow_data_function_log",
sa.Column("env_id", sa.String(length=64), nullable=True),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
sa.Column("node_key", sa.String(length=128), nullable=False),
sa.Column("node_start_state", sa.JSON(), nullable=True),
sa.Column("node_end_state", sa.JSON(), nullable=True),
sa.Column("function_key", sa.String(length=128), nullable=False),
sa.Column("function_params", sa.JSON(), nullable=True),
sa.Column("runtime_url", sa.String(length=128), nullable=True),
sa.Column("queued_at", sa.DateTime(), nullable=True),
sa.Column("started_at", sa.DateTime(), nullable=True),
sa.Column("completed_at", sa.DateTime(), nullable=True),
sa.Column("error", sa.JSON(), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"_snapflow_generated_schema",
sa.Column("env_id", sa.String(length=64), nullable=True),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.Column("key", sa.String(length=128), nullable=False),
sa.Column("definition", sa.JSON(), nullable=True),
sa.PrimaryKeyConstraint("key"),
)
op.create_table(
"_snapflow_node_state",
sa.Column("env_id", sa.String(length=64), nullable=True),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
sa.Column("node_key", sa.String(length=128), nullable=True),
sa.Column("state", sa.JSON(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("env_id", "node_key"),
)
op.create_table(
"_snapflow_data_block_log",
sa.Column("env_id", sa.String(length=64), nullable=True),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
sa.Column("function_log_id", sa.Integer(), nullable=False),
sa.Column("data_block_id", sa.String(length=128), nullable=False),
sa.Column("stream_name", sa.String(length=128), nullable=True),
sa.Column(
"direction",
sa.Enum("INPUT", "OUTPUT", name="direction", native_enum=False),
nullable=False,
),
sa.Column("processed_at", sa.DateTime(), nullable=False),
sa.Column("invalidated", sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(
["data_block_id"],
["_snapflow_data_block_metadata.id"],
),
sa.ForeignKeyConstraint(
["function_log_id"],
["_snapflow_data_function_log.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"_snapflow_stored_data_block_metadata",
sa.Column("env_id", sa.String(length=64), nullable=True),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.Column("id", sa.String(length=128), nullable=False),
sa.Column("name", sa.String(length=128), nullable=True),
sa.Column("data_block_id", sa.String(length=128), nullable=False),
sa.Column("storage_url", sa.String(length=128), nullable=False),
sa.Column(
"data_format",
snapflow.core.metadata.orm.DataFormatType(length=128),
nullable=False,
),
sa.ForeignKeyConstraint(
["data_block_id"],
["_snapflow_data_block_metadata.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"_snapflow_alias",
sa.Column("env_id", sa.String(length=64), nullable=True),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
sa.Column("name", sa.String(length=128), nullable=True),
sa.Column("data_block_id", sa.String(length=128), nullable=False),
sa.Column("stored_data_block_id", sa.String(length=128), nullable=False),
sa.ForeignKeyConstraint(
["data_block_id"],
["_snapflow_data_block_metadata.id"],
),
sa.ForeignKeyConstraint(
["stored_data_block_id"],
["_snapflow_stored_data_block_metadata.id"],
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("env_id", "name"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("_snapflow_alias")
op.drop_table("_snapflow_stored_data_block_metadata")
op.drop_table("_snapflow_data_block_log")
op.drop_table("_snapflow_node_state")
op.drop_table("_snapflow_generated_schema")
op.drop_table("_snapflow_data_function_log")
op.drop_table("_snapflow_data_block_metadata")
# ### end Alembic commands ###
|
[
"kenvanharen@gmail.com"
] |
kenvanharen@gmail.com
|
beec8483553b7ce3ead4a12e4517422e9089ff59
|
27c7664cc3aa3c12d9f96ce30ac1d1e6c0c52e0b
|
/test.py
|
9ea86b9c45266044bae2e87db4d93f206f2c21ff
|
[] |
no_license
|
RaindragonD/transporternet-torch
|
3d3548f54ee8a902e6b4785afa5448376e1e558a
|
7bb2075771d458d58bc49ebd9b4bd42675fb6ba7
|
refs/heads/master
| 2023-03-10T19:43:22.572230
| 2021-02-27T10:00:14
| 2021-02-27T10:00:14
| 342,817,090
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,800
|
py
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
"""Ravens main training script."""
import argparse
import os
import pickle
import numpy as np
from ravens import agents
from ravens import Dataset
from ravens import Environment
from ravens import tasks
import torch
def main():
# Parse command line arguments.
parser = argparse.ArgumentParser()
parser.add_argument('--disp', action='store_true')
parser.add_argument('--task', default='block-insertion')
parser.add_argument('--agent', default='transporter')
parser.add_argument('--n_demos', default=100, type=int)
parser.add_argument('--n_steps', default=40000, type=int)
parser.add_argument('--n_runs', default=1, type=int)
parser.add_argument('--gpu', default=0, type=int)
parser.add_argument('--gpu_limit', default=None, type=int)
args = parser.parse_args()
# Configure which GPU to use.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
# Todo: Configure how much GPU to use (in Gigabytes).
# if args.gpu_limit is not None:
# mem_limit = 1024 * args.gpu_limit
# dev_cfg = [cfg.VirtualDeviceConfiguration(memory_limit=mem_limit)]
# cfg.set_virtual_device_configuration(gpus[0], dev_cfg)
# Initialize environment and task.
env = Environment(args.disp, hz=480)
task = tasks.names[args.task]()
task.mode = 'test'
# Load test dataset.
dataset = Dataset(os.path.join('data', f'{args.task}-test'))
# Run testing for each training run.
for train_run in range(args.n_runs):
name = f'{args.task}-{args.agent}-{args.n_demos}-{train_run}'
# Initialize agent.
np.random.seed(train_run)
torch.manual_seed(train_run)
agent = agents.names[args.agent](name, args.task, device)
# # Run testing every interval.
# for train_step in range(0, args.n_steps + 1, args.interval):
# Load trained agent.
if args.n_steps > 0:
agent.load(args.n_steps, device)
# Run testing and save total rewards with last transition info.
results = []
all_actions = []
# all_actions = pickle.load(open('test-actions.pkl', 'rb'))
for i in range(dataset.n_episodes):
print(f'Test: {i + 1}/{dataset.n_episodes}')
actions = []
# actions = all_actions[i]
episode, seed = dataset.load(i)
goal = episode[-1]
total_reward = 0
np.random.seed(seed)
obs, reward, _, info = env.reset(task)
for j in range(task.max_steps):
act = agent.act(obs, info, goal)
actions.append(act)
obs, reward, done, info = env.step(act)
total_reward += reward
print(f'{done} {total_reward}')
if done:
break
results.append((total_reward, info))
all_actions.append(actions)
# Save results.
# pickle.dump(results, open(f'{name}-{args.n_steps}.pkl', 'wb'))
pickle.dump(all_actions, open('test-actions.pkl', 'wb'))
if __name__ == '__main__':
main()
|
[
"xlfdlyl@sina.com"
] |
xlfdlyl@sina.com
|
0d292aca4a1380aea123d4d98c8f821b69bdba90
|
68d3ae6defc49ae4aa970b1b275df1590ef0588b
|
/fix_ngrok.py
|
064c9a0984efb326761173719113e148ca3bda39
|
[] |
no_license
|
f1gur4nt1/fix_ngrok
|
16f1f64c03b077f3879166b633faa568c978d73f
|
b796791b0b86f9c9f6a40e2ba9830a556fa74af8
|
refs/heads/master
| 2021-01-01T21:28:28.042477
| 2020-02-09T18:06:10
| 2020-02-09T18:06:10
| 239,347,326
| 2
| 3
| null | 2020-06-27T17:45:50
| 2020-02-09T17:48:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,465
|
py
|
from requests import get
import os
import re
def get_https_proxys():
global https_proxys
global head
head = {"User-Agent":"Mozilla/5.0 (Linux; U; Android 4.4; en-us; Nexus 4 Build/JOP24G) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30"}
res = get(url="https://www.proxy-list.download/api/v1/get?type=https",headers=head)
html = res.text
https_proxys = re.findall(r"[\w\.?\:?]+",html)
def check_proxy():
global http_proxy
print("FIXING NGROK (Consertando ngrok) ...")
working = []
for prox in https_proxys:
https_proxy = {"https":"https://"+prox}
print(https_proxy)
try:
res = get(url="https://www.google.com/",headers=head,proxies=https_proxy,timeout=3)
if res.status_code == 200:
if not https_proxy in working:
try:
http_proxy = {"http":"http://"+prox}
print(http_proxy)
res = get(url="http://spys.one/",headers=head,proxies=http_proxy,timeout=3)
if res.status_code == 200:
print(res.status_code)
http_proxy = http_proxy["http"]
break
except:
None
except:
None
get_https_proxys()
check_proxy()
os.system("echo 'export http_proxy=\"{}\"' >> $HOME/.bashrc".format(http_proxy))
print("[+] NGROK FIXED AS SUCESS! (ngrok consertado com sucesso!)\n")
print("[*] TYPE 'bash' FOR IT WORK IN THIS TERMINAL! (digite bash para funcionar nesse terminal)")
|
[
"noreply@github.com"
] |
noreply@github.com
|
3bdcba739946895a679cdd396dc0de58e78a9384
|
7821b4467feba1f186bd2743fda73280cfd57ba3
|
/ADDA/adda_ori.py
|
02d403916e3c793b182152bcfb96c1795c50706e
|
[
"MIT"
] |
permissive
|
comprehensiveMap/EI328-project
|
be50620ddd57d99326f4129876a05cd980b91cc0
|
145495454487c7a4496751ce3621873f6cd80872
|
refs/heads/master
| 2022-08-04T05:33:29.813283
| 2020-05-28T14:06:34
| 2020-05-28T14:06:34
| 263,349,832
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,486
|
py
|
"""
Implements ADDA:
Adversarial Discriminative Domain Adaptation, Tzeng et al. (2017)
"""
import argparse
import torch
from torch import nn
from torch.utils.data import DataLoader
from tqdm import tqdm, trange
from train_source import preprocess_train, preprocess_test, preprocess_train_single
import config
from models import Net
from utils import loop_iterable, set_requires_grad, GrayscaleToRgb
torch.manual_seed(1919810)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
EXTRACTED_FEATURE_DIM = 128
def test(args, model):
X_target, y_target = preprocess_test(args.person)
target_dataset = torch.utils.data.TensorDataset(X_target, y_target)
target_loader = DataLoader(target_dataset, batch_size=args.batch_size,
num_workers=1, pin_memory=True)
model.eval()
total_accuracy = 0
with torch.no_grad():
for x, y_true in tqdm(target_loader, leave=False):
x, y_true = x.to(device), y_true.to(device)
y_pred = model(x)
total_accuracy += (y_pred.max(1)[1] == y_true).float().mean().item()
mean_accuracy = total_accuracy / len(target_loader)
#print(f'Accuracy on target data: {mean_accuracy:.4f}')
return mean_accuracy
def gen_pred(args, model):
X_target, y_target = preprocess_test(args.person)
target_dataset = torch.utils.data.TensorDataset(X_target, y_target)
target_loader = DataLoader(target_dataset, batch_size=args.batch_size,
num_workers=1, pin_memory=True)
model.eval()
preds = []
with torch.no_grad():
for x, y_true in tqdm(target_loader, leave=False):
x, y_true = x.to(device), y_true.to(device)
y_pred = model(x).tolist()
preds.append(y_pred)
return preds
def main(args):
source_model = Net().to(device)
source_model.load_state_dict(torch.load(args.MODEL_FILE))
source_model.eval()
set_requires_grad(source_model, requires_grad=False)
clf = source_model
source_model = source_model.feature_extractor
target_model = Net().to(device)
target_model.load_state_dict(torch.load(args.MODEL_FILE))
target_model = target_model.feature_extractor
discriminator = nn.Sequential(
nn.Linear(EXTRACTED_FEATURE_DIM, 64),
nn.ReLU(),
nn.BatchNorm1d(64),
nn.Linear(64, 1),
nn.Sigmoid()
).to(device)
#half_batch = args.batch_size // 2
batch_size = args.batch_size
# X_source, y_source = preprocess_train()
X_source, y_source = preprocess_train_single(1)
source_dataset = torch.utils.data.TensorDataset(X_source, y_source)
source_loader = DataLoader(source_dataset, batch_size=batch_size,
shuffle=False, num_workers=1, pin_memory=True)
X_target, y_target = preprocess_test(args.person)
target_dataset = torch.utils.data.TensorDataset(X_target, y_target)
target_loader = DataLoader(target_dataset, batch_size=batch_size,
shuffle=False, num_workers=1, pin_memory=True)
discriminator_optim = torch.optim.Adam(discriminator.parameters())
target_optim = torch.optim.Adam(target_model.parameters(), lr=3e-6)
criterion = nn.BCEWithLogitsLoss()
best_tar_acc = test(args, clf)
for epoch in range(1, args.epochs+1):
source_loader = DataLoader(source_loader.dataset, batch_size=batch_size, shuffle=True)
target_loader = DataLoader(target_loader.dataset, batch_size=batch_size, shuffle=True)
batch_iterator = zip(loop_iterable(source_loader), loop_iterable(target_loader))
total_loss = 0
adv_loss = 0
total_accuracy = 0
second_acc = 0
for _ in trange(args.iterations, leave=False):
# Train discriminator
set_requires_grad(target_model, requires_grad=False)
set_requires_grad(discriminator, requires_grad=True)
discriminator.train()
for _ in range(args.k_disc):
(source_x, _), (target_x, _) = next(batch_iterator)
source_x, target_x = source_x.to(device), target_x.to(device)
source_features = source_model(source_x).view(source_x.shape[0], -1)
target_features = target_model(target_x).view(target_x.shape[0], -1)
discriminator_x = torch.cat([source_features, target_features])
discriminator_y = torch.cat([torch.ones(source_x.shape[0], device=device),
torch.zeros(target_x.shape[0], device=device)])
preds = discriminator(discriminator_x).squeeze()
loss = criterion(preds, discriminator_y)
discriminator_optim.zero_grad()
loss.backward()
discriminator_optim.step()
total_loss += loss.item()
total_accuracy += ((preds >= 0.5).long() == discriminator_y.long()).float().mean().item()
# Train classifier
set_requires_grad(target_model, requires_grad=True)
set_requires_grad(discriminator, requires_grad=False)
target_model.train()
for _ in range(args.k_clf):
_, (target_x, _) = next(batch_iterator)
target_x = target_x.to(device)
target_features = target_model(target_x).view(target_x.shape[0], -1)
# flipped labels
discriminator_y = torch.ones(target_x.shape[0], device=device)
preds = discriminator(target_features).squeeze()
second_acc += ((preds >= 0.5).long() == discriminator_y.long()).float().mean().item()
loss = criterion(preds, discriminator_y)
adv_loss += loss.item()
target_optim.zero_grad()
loss.backward()
target_optim.step()
mean_loss = total_loss / (args.iterations*args.k_disc)
mean_adv_loss = adv_loss / (args.iterations * args.k_clf)
dis_accuracy = total_accuracy / (args.iterations*args.k_disc)
sec_acc = second_acc / (args.iterations * args.k_clf)
clf.feature_extractor = target_model
tar_accuarcy = test(args, clf)
if tar_accuarcy > best_tar_acc:
best_tar_acc = tar_accuarcy
torch.save(clf.state_dict(), 'trained_models/adda.pt')
tqdm.write(f'EPOCH {epoch:03d}: discriminator_loss={mean_loss:.4f}, adv_loss = {mean_adv_loss:.4f}, '
f'discriminator_accuracy={dis_accuracy:.4f}, tar_accuary = {tar_accuarcy:.4f}, best_accuracy = {best_tar_acc:.4f}, sec_acc = {sec_acc:.4f}')
# Create the full target model and save it
clf.feature_extractor = target_model
#torch.save(clf.state_dict(), 'trained_models/adda.pt')
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser(description='Domain adaptation using ADDA')
arg_parser.add_argument('MODEL_FILE', help='A model in trained_models')
arg_parser.add_argument('--batch-size', type=int, default=128)
arg_parser.add_argument('--iterations', type=int, default=50)
arg_parser.add_argument('--epochs', type=int, default=30)
arg_parser.add_argument('--k-disc', type=int, default=1)
arg_parser.add_argument('--k-clf', type=int, default=3)
arg_parser.add_argument('--person', type=int, default=3)
args = arg_parser.parse_args()
main(args)
|
[
"1175201255@qq.com"
] |
1175201255@qq.com
|
ceb03e78d5da369eaa15cfadb539d06f2ad3979b
|
c3e2f56672e01590dc7dc7e184f30c2884ce5d3a
|
/Programs/MyPythonXII/Unit1/PyChap03/summatrix.py
|
a00722d07d1ce3b80baa6858b737eb411c6ad68e
|
[] |
no_license
|
mridulrb/Basic-Python-Examples-for-Beginners
|
ef47e830f3cc21cee203de2a7720c7b34690e3e1
|
86b0c488de4b23b34f7424f25097afe1874222bd
|
refs/heads/main
| 2023-01-04T09:38:35.444130
| 2020-10-18T15:59:29
| 2020-10-18T15:59:29
| 305,129,417
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,941
|
py
|
# File name: ...\\MyPythonXII\Unit1\PyChap03\summatrix.py
# Program to add two matrices
# Declaration of three 10 x 10 matrices
A = [[0 for x in range(10)] for x in range(10)]
B = [[0 for x in range(10)] for x in range(10)]
C = [[0 for x in range(10)] for x in range(10)]
print("Enter the number of Rows of matrix A: ", end='')
r = int(input())
print("Enter the number of Columns of matrix A: ", end='')
c = int(input())
print("Enter the number of Rows of matrix B: ", end='')
r1 = int(input())
print("Enter the number of Columns of matrix B: ", end='')
c1 = int(input())
# Before accepting the Elements Check if no of
# rows and columns of both matrices is equal
if (r == r1 and c == c1):
# Accept the Elements for matrix A
for i in range(r):
for j in range(c):
print("Enter the element A[%d][%d]: " % (i, j), end='')
A[i][j] = int(input())
# Accept the Elements for matrix B
for i in range(r):
for j in range(c):
print("Enter the element B[%d][%d]: " % (i, j), end='')
B[i][j] = int(input())
# Addition of two matrices
for i in range(r):
for j in range(c):
C[i][j] = A[i][j] + B[i][j]
# First matrix
print("Matrix A:")
for i in range(r):
print(" "*5, end="")
for j in range(c):
print("{0:^3}".format(A[i][j]), end=' ')
print()
print("Matrix B:")
for i in range(r):
print(" "*5, end="")
for j in range(c):
print("{0:^3}".format(B[i][j]), end=' ')
print()
# Print out the Resultant Matrix C
print("The Addition of two Matrices C is : ")
for i in range(r):
print(" "*5, end="")
for j in range(c):
print ("{0:^3}".format(C[i][j]), end=' ')
print()
else:
print("Order of two matrices is not same ")
|
[
"mridurb@gmail.com"
] |
mridurb@gmail.com
|
edee2cdbddbabbec63d798c0230a81c823d0ffae
|
9e155c86d1cfda1fa5fd115f1fd5681287836d39
|
/script_v1/word_generator.py
|
a48020f1268f4fc5c43e57a261d676d0138dc25a
|
[] |
no_license
|
hiropppe/ELSA
|
59370626045ae28cd849edaef0d09cff67acfbb2
|
1ece189564e3827a4a7a44cd5f87e0ae4fca5c88
|
refs/heads/master
| 2020-05-27T22:04:53.007013
| 2019-11-14T04:48:28
| 2019-11-14T04:48:28
| 188,803,273
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,317
|
py
|
''' Extracts lists of words from a given input to be used for later vocabulary
generation or for creating tokenized datasets.
Supports functionality for handling different file types and
filtering/processing of this input.
'''
from __future__ import division, print_function
import MeCab
import os
import unicodedata
import neologdn
import numpy as np
import re
import stanfordnlp
from multiprocessing import Pool
from text_unidecode import unidecode
from tokens import RE_MENTION
from filter_utils import (
convert_linebreaks,
convert_nonbreaking_space,
correct_length,
extract_emojis,
mostly_english,
non_english_user,
process_word,
punct_word,
remove_control_chars,
remove_variation_selectors,
separate_emojis_and_text)
from functools import partial
from nltk.tokenize.casual import TweetTokenizer
from tqdm import tqdm
SPACE_RE = re.compile(r'[\s\u3000]+')
# Only catch retweets in the beginning of the tweet as those are the
# automatically added ones.
# We do not want to remove tweets like "Omg.. please RT this!!"
RETWEET_RE = re.compile(r'^[rR][tT]')
# Use fast and less precise regex for removing tweets with URLs
# It doesn't matter too much if a few tweets with URL's make it through
URLS_RE = re.compile(r'https?://|www\.')
MENTION_RE = re.compile(RE_MENTION)
ALLOWED_CONVERTED_UNICODE_PUNCTUATION = """!"#$'()+,-.:;<=>?@`~"""
class StanfordTokenizer():
def __init__(self, lang, model_dir, processors='tokenize,mwt,pos,lemma'):
self.nlp = stanfordnlp.Pipeline(processors=processors,
models_dir=model_dir,
lang=lang)
def tokenize(self, text, lemma=False, lower=True):
def word_text(word):
text = word.lemma if lemma else word.text
if lower:
text = text.lower()
return text
return [word_text(word) for sent in self.nlp(text).sentences for word in sent.words]
class MeCabTokenizer():
def __init__(self, stem=False, neologd=False, neologdn=False):
option = ""
if stem:
option += "-F\s%f[6] -U\s%m -E\\n"
else:
option += "-Owakati"
if neologd:
option += " -d /usr/local/lib/mecab/dic/mecab-ipadic-neologd"
self.tagger = MeCab.Tagger(option)
if neologdn:
self.neologdn = True
else:
self.neologdn = False
def tokenize(self, text):
if self.neologdn:
text = neologdn.normalize(text)
return self.tagger.parse(text).split()
def get_default_tokenizer(lang, model_dir='/data/stanfordnlp_resources'):
if lang == 'ja':
return MeCabTokenizer()
elif lang in ('ar', 'zh'):
return StanfordTokenizer(lang, model_dir, processors="tokenize,mwt")
else:
return TweetTokenizer(preserve_case=True, reduce_len=True, strip_handles=True)
class WordGenerator():
''' Cleanses input and converts into words. Needs all sentences to be in
Unicode format. Has subclasses that read sentences differently based on
file type.
Takes a generator as input. This can be from e.g. a file.
unicode_handling in ['ignore_sentence', 'convert_punctuation', 'allow']
unicode_handling in ['ignore_emoji', 'ignore_sentence', 'allow']
'''
def __init__(self,
file_path,
lang,
norm_unicode_text=False,
allow_unicode_text=False,
ignore_emojis=True,
remove_variation_selectors=True,
break_replacement=True,
processes=1,
chunksize=100):
self.file_path = file_path
self.lang = lang
self.tokenizer = None
self.norm_unicode_text = norm_unicode_text
self.allow_unicode_text = allow_unicode_text
self.remove_variation_selectors = remove_variation_selectors
self.ignore_emojis = ignore_emojis
self.break_replacement = break_replacement
self.processes = processes
self.chunksize = chunksize
self.reset_stats()
def get_words(self, sentence):
""" Tokenizes a sentence into individual words.
Converts Unicode punctuation into ASCII if that option is set.
Ignores sentences with Unicode if that option is set.
Returns an empty list of words if the sentence has Unicode and
that is not allowed.
"""
sentence = sentence.strip().lower()
if self.break_replacement:
sentence = convert_linebreaks(sentence)
if self.remove_variation_selectors:
sentence = remove_variation_selectors(sentence)
if self.tokenizer is None:
self.tokenizer = get_default_tokenizer(self.lang)
words = self.tokenizer.tokenize(sentence)
if self.norm_unicode_text:
converted_words = []
for w in words:
accept_sentence, c_w = self.convert_unicode_word(w)
# Unicode word detected and not allowed
if not accept_sentence:
return []
else:
converted_words.append(c_w)
words = converted_words
words = [process_word(w) for w in words]
return words
def check_ascii(self, word):
""" Returns whether a word is ASCII """
try:
word.encode('ascii')
return True
except (UnicodeDecodeError, UnicodeEncodeError):
return False
def convert_unicode_punctuation(self, word):
word_converted_punct = []
for c in word:
decoded_c = unidecode(c).lower()
if len(decoded_c) == 0:
# Cannot decode to anything reasonable
word_converted_punct.append(c)
else:
# Check if all punctuation and therefore fine
# to include unidecoded version
allowed_punct = punct_word(
decoded_c,
punctuation=ALLOWED_CONVERTED_UNICODE_PUNCTUATION)
if allowed_punct:
word_converted_punct.append(decoded_c)
else:
word_converted_punct.append(c)
return ''.join(word_converted_punct)
def convert_unicode_word(self, word):
""" Converts Unicode words to ASCII using unidecode. If Unicode is not
allowed (set as a variable during initialization), then only
punctuation that can be converted to ASCII will be allowed.
"""
if self.check_ascii(word):
return True, word
# First we ensure that the Unicode is normalized so it's
# always a single character.
word = unicodedata.normalize("NFKC", word)
# Convert Unicode punctuation to ASCII equivalent. We want
# e.g. u"\u203c" (double exclamation mark) to be treated the same
# as u"!!" no matter if we allow other Unicode characters or not.
word = self.convert_unicode_punctuation(word)
if self.ignore_emojis:
_, word = separate_emojis_and_text(word)
# If conversion of punctuation and removal of emojis took care
# of all the Unicode or if we allow Unicode then everything is fine
if self.check_ascii(word) or self.allow_unicode_text:
return True, word
else:
# Sometimes we might want to simply ignore Unicode sentences
# (e.g. for vocabulary creation). This is another way to prevent
# "polution" of strange Unicode tokens from low quality datasets
return False, ''
def data_preprocess_filtering(self, line, iter_i):
""" To be overridden with specific preprocessing/filtering behavior
if desired.
Returns a boolean of whether the line should be accepted and the
preprocessed text.
Runs prior to tokenization.
"""
return True, line, {}
def data_postprocess_filtering(self, words, iter_i):
""" To be overridden with specific postprocessing/filtering behavior
if desired.
Returns a boolean of whether the line should be accepted and the
postprocessed text.
Runs after tokenization.
"""
return True, words, {}
def extract_valid_sentence_words(self, line):
""" Line may either a string of a list of strings depending on how
the stream is being parsed.
Domain-specific processing and filtering can be done both prior to
and after tokenization.
Custom information about the line can be extracted during the
processing phases and returned as a dict.
"""
info = {}
pre_valid, pre_line, pre_info = \
self.data_preprocess_filtering(line, self.stats['total'])
info.update(pre_info)
if not pre_valid:
self.stats['pretokenization_filtered'] += 1
return False, [], info
words = self.get_words(pre_line)
if len(words) == 0:
self.stats['unicode_filtered'] += 1
return False, [], info
post_valid, post_words, post_info = \
self.data_postprocess_filtering(words, self.stats['total'])
info.update(post_info)
if not post_valid:
self.stats['posttokenization_filtered'] += 1
return post_valid, post_words, info
def generate_array_from_input(self):
sentences = []
for words in self:
sentences.append(words)
return sentences
def reset_stats(self):
self.stats = {'pretokenization_filtered': 0,
'unicode_filtered': 0,
'posttokenization_filtered': 0,
'total': 0,
'valid': 0}
def __iter__(self):
if self.file_path is None:
raise ValueError("Stream should be set before iterating over it!")
if self.processes > 1:
pool = Pool(self.processes)
map_func = partial(pool.imap_unordered, chunksize=self.chunksize)
else:
pool = None
map_func = map
try:
with open(self.file_path) as stream:
for (valid, words, info) in tqdm(map_func(self.extract_valid_sentence_words, stream)):
# print("rnmb", words)
# Words may be filtered away due to unidecode etc.
# In that case the words should not be passed on.
if valid and len(words):
self.stats['valid'] += 1
yield words, info
self.stats['total'] += 1
# print("cnmb", words, "\n")
finally:
if pool is not None:
pool.close()
class TweetWordGenerator(WordGenerator):
''' Returns np array or generator of ASCII sentences for given tweet input.
Any file opening/closing should be handled outside of this class.
'''
def __init__(self,
file_path,
lang,
wanted_emojis=None,
english_words=None,
norm_unicode_text=True,
allow_unicode_text=True,
ignore_retweets=True,
ignore_url_tweets=True,
ignore_mention_tweets=False,
processes=1,
chunksize=100):
self.wanted_emojis = wanted_emojis
self.english_words = english_words
self.ignore_retweets = ignore_retweets
self.ignore_url_tweets = ignore_url_tweets
self.ignore_mention_tweets = ignore_mention_tweets
WordGenerator.__init__(self, file_path, lang, ignore_emojis=False,
norm_unicode_text=norm_unicode_text,
allow_unicode_text=allow_unicode_text,
processes=processes, chunksize=chunksize)
def validated_tweet(self, text):
''' A bunch of checks to determine whether the tweet is valid.
Also returns emojis contained by the tweet.
'''
# Ordering of validations is important for speed
# If it passes all checks, then the tweet is validated for usage
if self.ignore_retweets and RETWEET_RE.search(text):
return False, []
if self.ignore_url_tweets and URLS_RE.search(text):
return False, []
if self.ignore_mention_tweets and MENTION_RE.search(text):
return False, []
if self.wanted_emojis is not None:
uniq_emojis = np.unique(extract_emojis(text, self.wanted_emojis))
if len(uniq_emojis) == 0:
return False, []
else:
uniq_emojis = []
return True, uniq_emojis
def data_preprocess_filtering(self, line, iter_i):
text = line.strip()
valid, emojis = self.validated_tweet(text)
if valid:
text = MENTION_RE.sub('', text)
text = RETWEET_RE.sub('', text)
text = URLS_RE.sub('', text)
text = SPACE_RE.sub(' ', text)
text = text.replace('&', '&')
text = text.strip()
else:
text = ''
return valid, text, {'emojis': emojis}
def data_postprocess_filtering(self, words, iter_i):
valid_length = correct_length(words, 1, None)
valid_english, n_words, n_english = mostly_english(words,
self.english_words)
return True, words, {'length': len(words),
'n_normal_words': n_words,
'n_english': n_english}
if valid_length and valid_english:
return True, words, {'length': len(words),
'n_normal_words': n_words,
'n_english': n_english}
else:
return False, [], {'length': len(words),
'n_normal_words': n_words,
'n_english': n_english}
|
[
"sergei.0910@gmail.com"
] |
sergei.0910@gmail.com
|
81c5ad86ce20a3e500ef6da7b3985ee4ef304f2d
|
b252583764a77b683e4e6586c0bfcac3fa1376a8
|
/weibo.py
|
7ed76a3a591f75c57a1f1f1ed31acb87b3e90c4f
|
[] |
no_license
|
PaulWongDlut/weibo-crawler
|
0550e278b2769045699c2b16dd683f2a5e6c7bd7
|
86cb76858cae4f0e3fca58de2ed998a2436560bd
|
refs/heads/master
| 2020-12-13T07:45:53.658402
| 2020-01-08T17:34:00
| 2020-01-08T17:34:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 39,609
|
py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import codecs
import copy
import csv
import json
import math
import os
import random
import sys
import traceback
from collections import OrderedDict
from datetime import date, datetime, timedelta
from time import sleep
import requests
from lxml import etree
from requests.adapters import HTTPAdapter
from tqdm import tqdm
class Weibo(object):
def __init__(self, config):
"""Weibo类初始化"""
self.validate_config(config)
self.filter = config[
'filter'] # 取值范围为0、1,程序默认值为0,代表要爬取用户的全部微博,1代表只爬取用户的原创微博
since_date = str(config['since_date'])
if since_date.isdigit():
since_date = str(date.today() - timedelta(int(since_date)))
self.since_date = since_date # 起始时间,即爬取发布日期从该值到现在的微博,形式为yyyy-mm-dd
self.write_mode = config[
'write_mode'] # 结果信息保存类型,为list形式,可包含csv、mongo和mysql三种类型
self.original_pic_download = config[
'original_pic_download'] # 取值范围为0、1, 0代表不下载原创微博图片,1代表下载
self.retweet_pic_download = config[
'retweet_pic_download'] # 取值范围为0、1, 0代表不下载转发微博图片,1代表下载
self.original_video_download = config[
'original_video_download'] # 取值范围为0、1, 0代表不下载原创微博视频,1代表下载
self.retweet_video_download = config[
'retweet_video_download'] # 取值范围为0、1, 0代表不下载转发微博视频,1代表下载
self.cookie = {'Cookie': config.get('cookie')} # 微博cookie,可填可不填
self.mysql_config = config['mysql_config'] # MySQL数据库连接配置,可以不填
user_id_list = config['user_id_list']
if not isinstance(user_id_list, list):
if not os.path.isabs(user_id_list):
user_id_list = os.path.split(
os.path.realpath(__file__))[0] + os.sep + user_id_list
user_id_list = self.get_user_list(user_id_list)
self.user_id_list = user_id_list # 要爬取的微博用户的user_id列表
self.user_id = '' # 用户id,如昵称为"Dear-迪丽热巴"的id为'1669879400'
self.user = {} # 存储目标微博用户信息
self.got_count = 0 # 存储爬取到的微博数
self.weibo = [] # 存储爬取到的所有微博信息
self.weibo_id_list = [] # 存储爬取到的所有微博id
def validate_config(self, config):
"""验证配置是否正确"""
# 验证filter、original_pic_download、retweet_pic_download、original_video_download、retweet_video_download
argument_lsit = [
'filter', 'original_pic_download', 'retweet_pic_download',
'original_video_download', 'retweet_video_download'
]
for argument in argument_lsit:
if config[argument] != 0 and config[argument] != 1:
sys.exit(u'%s值应为0或1,请重新输入' % config[argument])
# 验证since_date
since_date = str(config['since_date'])
if (not self.is_date(since_date)) and (not since_date.isdigit()):
sys.exit(u'since_date值应为yyyy-mm-dd形式或整数,请重新输入')
# 验证write_mode
write_mode = ['csv', 'json', 'mongo', 'mysql']
if not isinstance(config['write_mode'], list):
sys.exit(u'write_mode值应为list类型')
for mode in config['write_mode']:
if mode not in write_mode:
sys.exit(
u'%s为无效模式,请从csv、json、mongo和mysql中挑选一个或多个作为write_mode' %
mode)
# 验证user_id_list
user_id_list = config['user_id_list']
if (not isinstance(user_id_list,
list)) and (not user_id_list.endswith('.txt')):
sys.exit(u'user_id_list值应为list类型或txt文件路径')
if not isinstance(user_id_list, list):
if not os.path.isabs(user_id_list):
user_id_list = os.path.split(
os.path.realpath(__file__))[0] + os.sep + user_id_list
if not os.path.isfile(user_id_list):
sys.exit(u'不存在%s文件' % user_id_list)
def is_date(self, since_date):
"""判断日期格式是否正确"""
try:
datetime.strptime(since_date, "%Y-%m-%d")
return True
except ValueError:
return False
def get_json(self, params):
"""获取网页中json数据"""
url = 'https://m.weibo.cn/api/container/getIndex?'
r = requests.get(url, params=params, cookies=self.cookie)
return r.json()
def get_weibo_json(self, page):
"""获取网页中微博json数据"""
params = {'containerid': '107603' + str(self.user_id), 'page': page}
js = self.get_json(params)
return js
def user_to_mongodb(self):
"""将爬取的用户信息写入MongoDB数据库"""
user_list = [self.user]
self.info_to_mongodb('user', user_list)
print(u'%s信息写入MongoDB数据库完毕' % self.user['screen_name'])
def user_to_mysql(self):
"""将爬取的用户信息写入MySQL数据库"""
mysql_config = {
'host': 'localhost',
'port': 3306,
'user': 'root',
'password': '123456',
'charset': 'utf8mb4'
}
# 创建'weibo'数据库
create_database = """CREATE DATABASE IF NOT EXISTS weibo DEFAULT
CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci"""
self.mysql_create_database(mysql_config, create_database)
# 创建'user'表
create_table = """
CREATE TABLE IF NOT EXISTS user (
id varchar(20) NOT NULL,
screen_name varchar(30),
gender varchar(10),
statuses_count INT,
followers_count INT,
follow_count INT,
description varchar(140),
profile_url varchar(200),
profile_image_url varchar(200),
avatar_hd varchar(200),
urank INT,
mbrank INT,
verified BOOLEAN DEFAULT 0,
verified_type INT,
verified_reason varchar(140),
PRIMARY KEY (id)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4"""
self.mysql_create_table(mysql_config, create_table)
self.mysql_insert(mysql_config, 'user', [self.user])
print(u'%s信息写入MySQL数据库完毕' % self.user['screen_name'])
def user_to_database(self):
"""将用户信息写入数据库"""
if 'mysql' in self.write_mode:
self.user_to_mysql()
if 'mongo' in self.write_mode:
self.user_to_mongodb()
def get_user_info(self):
"""获取用户信息"""
params = {'containerid': '100505' + str(self.user_id)}
js = self.get_json(params)
if js['ok']:
info = js['data']['userInfo']
user_info = {}
user_info['id'] = self.user_id
user_info['screen_name'] = info.get('screen_name', '')
user_info['gender'] = info.get('gender', '')
user_info['statuses_count'] = info.get('statuses_count', 0)
user_info['followers_count'] = info.get('followers_count', 0)
user_info['follow_count'] = info.get('follow_count', 0)
user_info['description'] = info.get('description', '')
user_info['profile_url'] = info.get('profile_url', '')
user_info['profile_image_url'] = info.get('profile_image_url', '')
user_info['avatar_hd'] = info.get('avatar_hd', '')
user_info['urank'] = info.get('urank', 0)
user_info['mbrank'] = info.get('mbrank', 0)
user_info['verified'] = info.get('verified', False)
user_info['verified_type'] = info.get('verified_type', 0)
user_info['verified_reason'] = info.get('verified_reason', '')
user = self.standardize_info(user_info)
self.user = user
self.user_to_database()
return user
def get_long_weibo(self, id):
"""获取长微博"""
url = 'https://m.weibo.cn/detail/%s' % id
html = requests.get(url, cookies=self.cookie).text
html = html[html.find('"status":'):]
html = html[:html.rfind('"hotScheme"')]
html = html[:html.rfind(',')]
html = '{' + html + '}'
js = json.loads(html, strict=False)
weibo_info = js.get('status')
if weibo_info:
weibo = self.parse_weibo(weibo_info)
return weibo
def get_pics(self, weibo_info):
"""获取微博原始图片url"""
if weibo_info.get('pics'):
pic_info = weibo_info['pics']
pic_list = [pic['large']['url'] for pic in pic_info]
pics = ','.join(pic_list)
else:
pics = ''
return pics
def get_live_photo(self, weibo_info):
"""获取live photo中的视频url"""
live_photo_list = []
live_photo = weibo_info.get('pic_video')
if live_photo:
prefix = 'https://video.weibo.com/media/play?livephoto=//us.sinaimg.cn/'
for i in live_photo.split(','):
if len(i.split(':')) == 2:
url = prefix + i.split(':')[1] + '.mov'
live_photo_list.append(url)
return live_photo_list
def get_video_url(self, weibo_info):
"""获取微博视频url"""
video_url = ''
video_url_list = []
if weibo_info.get('page_info'):
if weibo_info['page_info'].get('media_info') and weibo_info[
'page_info'].get('type') == 'video':
media_info = weibo_info['page_info']['media_info']
video_url = media_info.get('mp4_720p_mp4')
if not video_url:
video_url = media_info.get('mp4_hd_url')
if not video_url:
video_url = media_info.get('mp4_sd_url')
if not video_url:
video_url = media_info.get('stream_url_hd')
if not video_url:
video_url = media_info.get('stream_url')
if video_url:
video_url_list.append(video_url)
live_photo_list = self.get_live_photo(weibo_info)
if live_photo_list:
video_url_list += live_photo_list
return ';'.join(video_url_list)
def download_one_file(self, url, file_path, type, weibo_id):
"""下载单个文件(图片/视频)"""
try:
if not os.path.isfile(file_path):
s = requests.Session()
s.mount(url, HTTPAdapter(max_retries=5))
downloaded = s.get(url, cookies=self.cookie, timeout=(5, 10))
with open(file_path, 'wb') as f:
f.write(downloaded.content)
except Exception as e:
error_file = self.get_filepath(
type) + os.sep + 'not_downloaded.txt'
with open(error_file, 'ab') as f:
url = str(weibo_id) + ':' + url + '\n'
f.write(url.encode(sys.stdout.encoding))
print('Error: ', e)
traceback.print_exc()
def handle_download(self, file_type, file_dir, urls, w):
"""处理下载相关操作"""
file_prefix = w['created_at'][:11].replace('-', '') + '_' + str(
w['id'])
if file_type == 'img':
if ',' in urls:
url_list = urls.split(',')
for i, url in enumerate(url_list):
file_suffix = url[url.rfind('.'):]
file_name = file_prefix + '_' + str(i + 1) + file_suffix
file_path = file_dir + os.sep + file_name
self.download_one_file(url, file_path, file_type, w['id'])
else:
file_suffix = urls[urls.rfind('.'):]
file_name = file_prefix + file_suffix
file_path = file_dir + os.sep + file_name
self.download_one_file(urls, file_path, file_type, w['id'])
else:
file_suffix = '.mp4'
if ';' in urls:
url_list = urls.split(';')
if url_list[0].endswith('.mov'):
file_suffix = '.mov'
for i, url in enumerate(url_list):
file_name = file_prefix + '_' + str(i + 1) + file_suffix
file_path = file_dir + os.sep + file_name
self.download_one_file(url, file_path, file_type, w['id'])
else:
if urls.endswith('.mov'):
file_suffix = '.mov'
file_name = file_prefix + file_suffix
file_path = file_dir + os.sep + file_name
self.download_one_file(urls, file_path, file_type, w['id'])
def download_files(self, file_type, weibo_type):
"""下载文件(图片/视频)"""
try:
describe = ''
if file_type == 'img':
describe = u'图片'
key = 'pics'
else:
describe = u'视频'
key = 'video_url'
if weibo_type == 'original':
describe = u'原创微博' + describe
else:
describe = u'转发微博' + describe
print(u'即将进行%s下载' % describe)
file_dir = self.get_filepath(file_type)
file_dir = file_dir + os.sep + describe
if not os.path.isdir(file_dir):
os.makedirs(file_dir)
for w in tqdm(self.weibo, desc='Download progress'):
if weibo_type == 'retweet':
if w.get('retweet'):
w = w['retweet']
else:
continue
if w.get(key):
self.handle_download(file_type, file_dir, w.get(key), w)
print(u'%s下载完毕,保存路径:' % describe)
print(file_dir)
except Exception as e:
print('Error: ', e)
traceback.print_exc()
def get_location(self, selector):
"""获取微博发布位置"""
location_icon = 'timeline_card_small_location_default.png'
span_list = selector.xpath('//span')
location = ''
for i, span in enumerate(span_list):
if span.xpath('img/@src'):
if location_icon in span.xpath('img/@src')[0]:
location = span_list[i + 1].xpath('string(.)')
break
return location
def get_topics(self, selector):
"""获取参与的微博话题"""
span_list = selector.xpath("//span[@class='surl-text']")
topics = ''
topic_list = []
for span in span_list:
text = span.xpath('string(.)')
if len(text) > 2 and text[0] == '#' and text[-1] == '#':
topic_list.append(text[1:-1])
if topic_list:
topics = ','.join(topic_list)
return topics
def get_at_users(self, selector):
"""获取@用户"""
a_list = selector.xpath('//a')
at_users = ''
at_list = []
for a in a_list:
if '@' + a.xpath('@href')[0][3:] == a.xpath('string(.)'):
at_list.append(a.xpath('string(.)')[1:])
if at_list:
at_users = ','.join(at_list)
return at_users
def string_to_int(self, string):
"""字符串转换为整数"""
if isinstance(string, int):
return string
elif string.endswith(u'万+'):
string = int(string[:-2] + '0000')
elif string.endswith(u'万'):
string = int(string[:-1] + '0000')
return int(string)
def standardize_date(self, created_at):
"""标准化微博发布时间"""
if u"刚刚" in created_at:
created_at = datetime.now().strftime("%Y-%m-%d")
elif u"分钟" in created_at:
minute = created_at[:created_at.find(u"分钟")]
minute = timedelta(minutes=int(minute))
created_at = (datetime.now() - minute).strftime("%Y-%m-%d")
elif u"小时" in created_at:
hour = created_at[:created_at.find(u"小时")]
hour = timedelta(hours=int(hour))
created_at = (datetime.now() - hour).strftime("%Y-%m-%d")
elif u"昨天" in created_at:
day = timedelta(days=1)
created_at = (datetime.now() - day).strftime("%Y-%m-%d")
elif created_at.count('-') == 1:
year = datetime.now().strftime("%Y")
created_at = year + "-" + created_at
return created_at
def standardize_info(self, weibo):
"""标准化信息,去除乱码"""
for k, v in weibo.items():
if 'bool' not in str(type(v)) and 'int' not in str(
type(v)) and 'list' not in str(
type(v)) and 'long' not in str(type(v)):
weibo[k] = v.replace(u"\u200b", "").encode(
sys.stdout.encoding, "ignore").decode(sys.stdout.encoding)
return weibo
def parse_weibo(self, weibo_info):
weibo = OrderedDict()
if weibo_info['user']:
weibo['user_id'] = weibo_info['user']['id']
weibo['screen_name'] = weibo_info['user']['screen_name']
else:
weibo['user_id'] = ''
weibo['screen_name'] = ''
weibo['id'] = int(weibo_info['id'])
weibo['bid'] = weibo_info['bid']
text_body = weibo_info['text']
selector = etree.HTML(text_body)
weibo['text'] = etree.HTML(text_body).xpath('string(.)')
weibo['pics'] = self.get_pics(weibo_info)
weibo['video_url'] = self.get_video_url(weibo_info)
weibo['location'] = self.get_location(selector)
weibo['created_at'] = weibo_info['created_at']
weibo['source'] = weibo_info['source']
weibo['attitudes_count'] = self.string_to_int(
weibo_info.get('attitudes_count', 0))
weibo['comments_count'] = self.string_to_int(
weibo_info.get('comments_count', 0))
weibo['reposts_count'] = self.string_to_int(
weibo_info.get('reposts_count', 0))
weibo['topics'] = self.get_topics(selector)
weibo['at_users'] = self.get_at_users(selector)
return self.standardize_info(weibo)
def print_user_info(self):
"""打印用户信息"""
print('+' * 100)
print(u'用户信息')
print(u'用户id:%s' % self.user['id'])
print(u'用户昵称:%s' % self.user['screen_name'])
gender = u'女' if self.user['gender'] == 'f' else u'男'
print(u'性别:%s' % gender)
print(u'微博数:%d' % self.user['statuses_count'])
print(u'粉丝数:%d' % self.user['followers_count'])
print(u'关注数:%d' % self.user['follow_count'])
if self.user.get('verified_reason'):
print(self.user['verified_reason'])
print(self.user['description'])
print('+' * 100)
def print_one_weibo(self, weibo):
"""打印一条微博"""
print(u'微博id:%d' % weibo['id'])
print(u'微博正文:%s' % weibo['text'])
print(u'原始图片url:%s' % weibo['pics'])
print(u'微博位置:%s' % weibo['location'])
print(u'发布时间:%s' % weibo['created_at'])
print(u'发布工具:%s' % weibo['source'])
print(u'点赞数:%d' % weibo['attitudes_count'])
print(u'评论数:%d' % weibo['comments_count'])
print(u'转发数:%d' % weibo['reposts_count'])
print(u'话题:%s' % weibo['topics'])
print(u'@用户:%s' % weibo['at_users'])
def print_weibo(self, weibo):
"""打印微博,若为转发微博,会同时打印原创和转发部分"""
if weibo.get('retweet'):
print('*' * 100)
print(u'转发部分:')
self.print_one_weibo(weibo['retweet'])
print('*' * 100)
print(u'原创部分:')
self.print_one_weibo(weibo)
print('-' * 120)
def get_one_weibo(self, info):
"""获取一条微博的全部信息"""
try:
weibo_info = info['mblog']
weibo_id = weibo_info['id']
retweeted_status = weibo_info.get('retweeted_status')
is_long = weibo_info.get('isLongText')
if retweeted_status: # 转发
retweet_id = retweeted_status.get('id')
is_long_retweet = retweeted_status.get('isLongText')
if is_long:
weibo = self.get_long_weibo(weibo_id)
if not weibo:
weibo = self.parse_weibo(weibo_info)
else:
weibo = self.parse_weibo(weibo_info)
if is_long_retweet:
retweet = self.get_long_weibo(retweet_id)
if not retweet:
retweet = self.parse_weibo(retweeted_status)
else:
retweet = self.parse_weibo(retweeted_status)
retweet['created_at'] = self.standardize_date(
retweeted_status['created_at'])
weibo['retweet'] = retweet
else: # 原创
if is_long:
weibo = self.get_long_weibo(weibo_id)
if not weibo:
weibo = self.parse_weibo(weibo_info)
else:
weibo = self.parse_weibo(weibo_info)
weibo['created_at'] = self.standardize_date(
weibo_info['created_at'])
return weibo
except Exception as e:
print("Error: ", e)
traceback.print_exc()
def is_pinned_weibo(self, info):
"""判断微博是否为置顶微博"""
weibo_info = info['mblog']
title = weibo_info.get('title')
if title and title.get('text') == u'置顶':
return True
else:
return False
def get_one_page(self, page):
"""获取一页的全部微博"""
try:
js = self.get_weibo_json(page)
if js['ok']:
weibos = js['data']['cards']
for w in weibos:
if w['card_type'] == 9:
wb = self.get_one_weibo(w)
if wb:
if wb['id'] in self.weibo_id_list:
continue
created_at = datetime.strptime(
wb['created_at'], "%Y-%m-%d")
since_date = datetime.strptime(
self.since_date, "%Y-%m-%d")
if created_at < since_date:
if self.is_pinned_weibo(w):
continue
else:
return True
if (not self.filter) or (
'retweet' not in wb.keys()):
self.weibo.append(wb)
self.weibo_id_list.append(wb['id'])
self.got_count += 1
self.print_weibo(wb)
except Exception as e:
print("Error: ", e)
traceback.print_exc()
def get_page_count(self):
"""获取微博页数"""
try:
weibo_count = self.user['statuses_count']
page_count = int(math.ceil(weibo_count / 10.0))
return page_count
except KeyError:
sys.exit(u'程序出错,错误原因可能为以下两者:\n'
u'1.user_id不正确;\n'
u'2.此用户微博可能需要设置cookie才能爬取。\n'
u'解决方案:\n'
u'请参考\n'
u'https://github.com/dataabc/weibo-crawler#如何获取user_id\n'
u'获取正确的user_id;\n'
u'或者参考\n'
u'https://github.com/dataabc/weibo-crawler#3程序设置\n'
u'中的“设置cookie”部分设置cookie信息')
def get_write_info(self, wrote_count):
"""获取要写入的微博信息"""
write_info = []
for w in self.weibo[wrote_count:]:
wb = OrderedDict()
for k, v in w.items():
if k not in ['user_id', 'screen_name', 'retweet']:
if 'unicode' in str(type(v)):
v = v.encode('utf-8')
wb[k] = v
if not self.filter:
if w.get('retweet'):
wb['is_original'] = False
for k2, v2 in w['retweet'].items():
if 'unicode' in str(type(v2)):
v2 = v2.encode('utf-8')
wb['retweet_' + k2] = v2
else:
wb['is_original'] = True
write_info.append(wb)
return write_info
def get_filepath(self, type):
"""获取结果文件路径"""
try:
file_dir = os.path.split(
os.path.realpath(__file__)
)[0] + os.sep + 'weibo' + os.sep + self.user['screen_name']
if type == 'img' or type == 'video':
file_dir = file_dir + os.sep + type
if not os.path.isdir(file_dir):
os.makedirs(file_dir)
if type == 'img' or type == 'video':
return file_dir
file_path = file_dir + os.sep + self.user_id + '.' + type
return file_path
except Exception as e:
print('Error: ', e)
traceback.print_exc()
def get_result_headers(self):
"""获取要写入结果文件的表头"""
result_headers = [
'id', 'bid', '正文', '原始图片url', '视频url', '位置', '日期', '工具', '点赞数',
'评论数', '转发数', '话题', '@用户'
]
if not self.filter:
result_headers2 = ['是否原创', '源用户id', '源用户昵称']
result_headers3 = ['源微博' + r for r in result_headers]
result_headers = result_headers + result_headers2 + result_headers3
return result_headers
def write_csv(self, wrote_count):
"""将爬到的信息写入csv文件"""
write_info = self.get_write_info(wrote_count)
result_headers = self.get_result_headers()
result_data = [w.values() for w in write_info]
if sys.version < '3': # python2.x
with open(self.get_filepath('csv'), 'ab') as f:
f.write(codecs.BOM_UTF8)
writer = csv.writer(f)
if wrote_count == 0:
writer.writerows([result_headers])
writer.writerows(result_data)
else: # python3.x
with open(self.get_filepath('csv'),
'a',
encoding='utf-8-sig',
newline='') as f:
writer = csv.writer(f)
if wrote_count == 0:
writer.writerows([result_headers])
writer.writerows(result_data)
print(u'%d条微博写入csv文件完毕,保存路径:' % self.got_count)
print(self.get_filepath('csv'))
def update_json_data(self, data, weibo_info):
"""更新要写入json结果文件中的数据,已经存在于json中的信息更新为最新值,不存在的信息添加到data中"""
data['user'] = self.user
if data.get('weibo'):
is_new = 1 # 待写入微博是否全部为新微博,即待写入微博与json中的数据不重复
for old in data['weibo']:
if weibo_info[-1]['id'] == old['id']:
is_new = 0
break
if is_new == 0:
for new in weibo_info:
flag = 1
for i, old in enumerate(data['weibo']):
if new['id'] == old['id']:
data['weibo'][i] = new
flag = 0
break
if flag:
data['weibo'].append(new)
else:
data['weibo'] += weibo_info
else:
data['weibo'] = weibo_info
return data
def write_json(self, wrote_count):
"""将爬到的信息写入json文件"""
data = {}
path = self.get_filepath('json')
if os.path.isfile(path):
with codecs.open(path, 'r', encoding="utf-8") as f:
data = json.load(f)
weibo_info = self.weibo[wrote_count:]
data = self.update_json_data(data, weibo_info)
with codecs.open(path, 'w', encoding="utf-8") as f:
json.dump(data, f, ensure_ascii=False)
print(u'%d条微博写入json文件完毕,保存路径:' % self.got_count)
print(path)
def info_to_mongodb(self, collection, info_list):
"""将爬取的信息写入MongoDB数据库"""
try:
import pymongo
except ImportError:
sys.exit(u'系统中可能没有安装pymongo库,请先运行 pip install pymongo ,再运行程序')
try:
from pymongo import MongoClient
client = MongoClient()
db = client['weibo']
collection = db[collection]
if len(self.write_mode) > 1:
new_info_list = copy.deepcopy(info_list)
else:
new_info_list = info_list
for info in new_info_list:
if not collection.find_one({'id': info['id']}):
collection.insert_one(info)
else:
collection.update_one({'id': info['id']}, {'$set': info})
except pymongo.errors.ServerSelectionTimeoutError:
sys.exit(u'系统中可能没有安装或启动MongoDB数据库,请先根据系统环境安装或启动MongoDB,再运行程序')
def weibo_to_mongodb(self, wrote_count):
"""将爬取的微博信息写入MongoDB数据库"""
self.info_to_mongodb('weibo', self.weibo[wrote_count:])
print(u'%d条微博写入MongoDB数据库完毕' % self.got_count)
def mysql_create(self, connection, sql):
"""创建MySQL数据库或表"""
try:
with connection.cursor() as cursor:
cursor.execute(sql)
finally:
connection.close()
def mysql_create_database(self, mysql_config, sql):
"""创建MySQL数据库"""
try:
import pymysql
except ImportError:
sys.exit(u'系统中可能没有安装pymysql库,请先运行 pip install pymysql ,再运行程序')
try:
if self.mysql_config:
mysql_config = self.mysql_config
connection = pymysql.connect(**mysql_config)
self.mysql_create(connection, sql)
except pymysql.OperationalError:
sys.exit(u'系统中可能没有安装或正确配置MySQL数据库,请先根据系统环境安装或配置MySQL,再运行程序')
def mysql_create_table(self, mysql_config, sql):
"""创建MySQL表"""
import pymysql
if self.mysql_config:
mysql_config = self.mysql_config
mysql_config['db'] = 'weibo'
connection = pymysql.connect(**mysql_config)
self.mysql_create(connection, sql)
def mysql_insert(self, mysql_config, table, data_list):
"""向MySQL表插入或更新数据"""
import pymysql
if len(data_list) > 0:
keys = ', '.join(data_list[0].keys())
values = ', '.join(['%s'] * len(data_list[0]))
if self.mysql_config:
mysql_config = self.mysql_config
mysql_config['db'] = 'weibo'
connection = pymysql.connect(**mysql_config)
cursor = connection.cursor()
sql = """INSERT INTO {table}({keys}) VALUES ({values}) ON
DUPLICATE KEY UPDATE""".format(table=table,
keys=keys,
values=values)
update = ','.join([
" {key} = values({key})".format(key=key)
for key in data_list[0]
])
sql += update
try:
cursor.executemany(
sql, [tuple(data.values()) for data in data_list])
connection.commit()
except Exception as e:
connection.rollback()
print('Error: ', e)
traceback.print_exc()
finally:
connection.close()
def weibo_to_mysql(self, wrote_count):
"""将爬取的微博信息写入MySQL数据库"""
mysql_config = {
'host': 'localhost',
'port': 3306,
'user': 'root',
'password': '123456',
'charset': 'utf8mb4'
}
# 创建'weibo'表
create_table = """
CREATE TABLE IF NOT EXISTS weibo (
id varchar(20) NOT NULL,
bid varchar(12) NOT NULL,
user_id varchar(20),
screen_name varchar(20),
text varchar(2000),
topics varchar(200),
at_users varchar(200),
pics varchar(1000),
video_url varchar(1000),
location varchar(100),
created_at DATETIME,
source varchar(30),
attitudes_count INT,
comments_count INT,
reposts_count INT,
retweet_id varchar(20),
PRIMARY KEY (id)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4"""
self.mysql_create_table(mysql_config, create_table)
weibo_list = []
retweet_list = []
if len(self.write_mode) > 1:
info_list = copy.deepcopy(self.weibo[wrote_count:])
else:
info_list = self.weibo[wrote_count:]
for w in info_list:
if 'retweet' in w:
w['retweet']['retweet_id'] = ''
retweet_list.append(w['retweet'])
w['retweet_id'] = w['retweet']['id']
del w['retweet']
else:
w['retweet_id'] = ''
weibo_list.append(w)
# 在'weibo'表中插入或更新微博数据
self.mysql_insert(mysql_config, 'weibo', retweet_list)
self.mysql_insert(mysql_config, 'weibo', weibo_list)
print(u'%d条微博写入MySQL数据库完毕' % self.got_count)
def write_data(self, wrote_count):
"""将爬到的信息写入文件或数据库"""
if self.got_count > wrote_count:
if 'csv' in self.write_mode:
self.write_csv(wrote_count)
if 'json' in self.write_mode:
self.write_json(wrote_count)
if 'mysql' in self.write_mode:
self.weibo_to_mysql(wrote_count)
if 'mongo' in self.write_mode:
self.weibo_to_mongodb(wrote_count)
def get_pages(self):
"""获取全部微博"""
self.get_user_info()
page_count = self.get_page_count()
wrote_count = 0
self.print_user_info()
page1 = 0
random_pages = random.randint(1, 5)
for page in tqdm(range(1, page_count + 1), desc='Progress'):
print(u'第%d页' % page)
is_end = self.get_one_page(page)
if is_end:
break
if page % 20 == 0: # 每爬20页写入一次文件
self.write_data(wrote_count)
wrote_count = self.got_count
# 通过加入随机等待避免被限制。爬虫速度过快容易被系统限制(一段时间后限
# 制会自动解除),加入随机等待模拟人的操作,可降低被系统限制的风险。默
# 认是每爬取1到5页随机等待6到10秒,如果仍然被限,可适当增加sleep时间
if page - page1 == random_pages and page < page_count:
sleep(random.randint(6, 10))
page1 = page
random_pages = random.randint(1, 5)
self.write_data(wrote_count) # 将剩余不足20页的微博写入文件
print(u'微博爬取完成,共爬取%d条微博' % self.got_count)
def get_user_list(self, file_name):
"""获取文件中的微博id信息"""
with open(file_name, 'rb') as f:
lines = f.read().splitlines()
lines = [line.decode('utf-8') for line in lines]
user_id_list = [
line.split(' ')[0] for line in lines
if len(line.split(' ')) > 0 and line.split(' ')[0].isdigit()
]
return user_id_list
def initialize_info(self, user_id):
"""初始化爬虫信息"""
self.weibo = []
self.user = {}
self.got_count = 0
self.user_id = user_id
self.weibo_id_list = []
def start(self):
"""运行爬虫"""
try:
for user_id in self.user_id_list:
self.initialize_info(user_id)
self.get_pages()
print(u'信息抓取完毕')
print('*' * 100)
if self.original_pic_download:
self.download_files('img', 'original')
if self.original_video_download:
self.download_files('video', 'original')
if not self.filter:
if self.retweet_pic_download:
self.download_files('img', 'retweet')
if self.retweet_video_download:
self.download_files('video', 'retweet')
except Exception as e:
print('Error: ', e)
traceback.print_exc()
def main():
try:
config_path = os.path.split(
os.path.realpath(__file__))[0] + os.sep + 'config.json'
if not os.path.isfile(config_path):
sys.exit(u'当前路径:%s 不存在配置文件config.json' %
(os.path.split(os.path.realpath(__file__))[0] + os.sep))
with open(config_path) as f:
config = json.loads(f.read())
wb = Weibo(config)
wb.start() # 爬取微博信息
except ValueError:
print(u'config.json 格式不正确,请参考 '
u'https://github.com/dataabc/weibo-crawler#3程序设置')
except Exception as e:
print('Error: ', e)
traceback.print_exc()
if __name__ == '__main__':
main()
|
[
"chillychen1991@gmail.com"
] |
chillychen1991@gmail.com
|
27b203253002e7765f61dc8f139c992c54414dc8
|
7c06959591e732bbefcc00f8559efca930c6a9ec
|
/app/dummy_data.py
|
44a20e1973e30834655a18f0028dde7b99e46248
|
[] |
no_license
|
walidzakaria/ESA-info
|
80b281e13065fd24b162632851062b65c5163610
|
698292a48a9064fdea9606e5a07eaef1348fad71
|
refs/heads/master
| 2020-04-07T18:48:10.677107
| 2018-11-30T21:36:58
| 2018-11-30T21:36:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 350
|
py
|
from app import models, db
from app import stores
dummy_instructors = [
models.Instructor(name='walid', esa_number='es1'),
models.Instructor(name='donna', esa_number='es2'),
models.Instructor(name='samah', esa_number='es3'),
]
db.drop_all()
db.create_all()
test = stores.InstructorStore()
for i in dummy_instructors:
test.add(i)
|
[
"walidpiano@yahoo.com"
] |
walidpiano@yahoo.com
|
8d87c168ede5b99d6772434ac3becb6f62d261d6
|
33f5753d63b69926d54204dfa3081fb52bcbede5
|
/LoggingPackage/custom_logger.py
|
a32fcd88e82a13c711d7f0dd913e1885e040fa81
|
[] |
no_license
|
Vbhatnagr93/Automation-framework
|
c178feff95831a22a271c673c71873c8dd05215f
|
4be8bca5fa54173e466c4fecff4dc9d977346a7e
|
refs/heads/master
| 2021-01-19T18:28:14.860133
| 2017-08-23T04:28:15
| 2017-08-23T04:28:15
| 101,137,417
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 596
|
py
|
import inspect
import logging
def customLogger(logLevel):
loggerName = inspect.stack()[1][3]
logger = logging.getLogger(loggerName)
#By default, log all messages
logger.setLevel(logging.DEBUG)
fileHandler = logging.FileHandler(filename="{0}.log".format(loggerName), mode='w') #Use Streamhandler for console logging
fileHandler.setLevel(logLevel)
formatter = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %H:%M:%S %p')
fileHandler.setFormatter(formatter)
logger.addHandler(fileHandler)
return logger
|
[
"vivek.bhatnagar93@gmail.com"
] |
vivek.bhatnagar93@gmail.com
|
f1fe3f7f304efeaaea37fbc13f6fbf2508adc188
|
8b648f1b20c973d73e461c53fea50f8b01262fc9
|
/code_your_own_quiz/Code_your_own_Quiz.py
|
19cec73990c4d4b6d93e32ee6b5f5c85cd07f01e
|
[] |
no_license
|
SJ029626/Code-Your-Own-Quiz
|
eba336ed723841465435a260d036178027009059
|
f032b4b55f616988bba0addb1bcceebe4d8eea35
|
refs/heads/master
| 2020-03-29T20:22:55.082755
| 2018-09-25T18:15:35
| 2018-09-25T18:15:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,336
|
py
|
#!/usr/bin/python
from string import Template
#level 1 easy
level_1 = '''
Python is a widely used $answ1-level programming language for general-purpose $answ2, created by Guido van Rossum and first released in 1991.
An interpreted language, Python has a design philosophy which emphasizes code readability (notably using whitespace indentation to delimit code
blocks rather than curly brackets or keywords), and a syntax which allows $answ3 to express concepts in fewer lines of code than
might be used in languages such as C++ or Java.The language provides constructs
intended to enable writing clear $answ4 on both a small and large scale.
'''
# level 2 medium
level_2 = '''
C was originally developed by $answ1 Ritchie between 1969 and 1973 at Bell Labs, and used to re-implement the Unix operating system.
It has since become one of the most widely used programming languages of all time, with
C compilers from various vendors available for the majority of existing computer architectures and operating systems. C has been
standardized by the American National Standards Institute (ANSI) since $answ2 (see ANSI C) and subsequently
by the International Organization for Standardization (ISO).
K&R introduced several language features:
Standard $answ3 library
long int data type
unsigned int data type
Compound assignment operators of the form =op (such as =-) were changed to the form op= (that is, -=) to remove the semantic ambiguity created
by constructs such as i=-10, which had been interpreted as i =- 10 (decrement i by 10) instead of the possibly intended i = -10 (let i be -10).
Even after the publication of the 1989 ANSI standard, for many years K&R C was still considered the "lowest common denominator" to which
C programmers restricted themselves when maximum portability was desired,
since many older compilers were still in use, and because carefully written $answ4 C code can be legal Standard C as well.
'''
# level 3 hard
level_3 = '''
Facebook is an American for-profit corporation and an online social media and social networking service based in Menlo Park, California.
The Facebook website was launched on February 4, 2004, by Mark Zuckerberg, along with fellow $answ1 College students and roommates,
$answ2, Andrew McCollum, Dustin Moskovitz, and Chris Hughes.
The founders had initially limited the website's membership to Harvard students; however,
later they expanded it to higher education institutions in the Boston area, the Ivy League schools, and Stanford University.
Facebook gradually added support for students at various other universities, and eventually to high school students as well.
Since 2006, anyone age $answ3 and older has been allowed to become a registered user of Facebook, though variations exist in the minimum age requirement,
depending on applicable local laws. The Facebook name comes from the face book directories often given to United States university students.
Facebook may be accessed by a large range of $answ4, laptops, tablet computers, and smartphones over the
Internet and mobile networks. After registering to use the site, users can create a user profile indicating their name,
occupation, schools attended and so on. Users can add other users as "friends", exchange messages, post status updates and digital photos,
share digital videos and links, use various software applications ("apps"), and receive notifications when others update their profiles or make posts.
Additionally, users may join common-interest user groups organized by workplace, school, hobbies or other topics, and categorize their friends into lists
such as "People From Work" or "Close Friends". In groups, editors can pin posts to top. Additionally, users can complain about or block unpleasant people.
Because of the large volume of data that users submit to the service, Facebook has come under scrutiny for its privacy policies.
Facebook makes most of its revenue from advertisements which appear onscreen.
'''
#level 4 very hard
level_4= '''
Google is an American multinational technology company specializing in Internet-related services and products.
These include online advertising technologies, $answ1, cloud computing, software, and hardware. Google was founded in 1998 by
Larry Page and Sergey Brin while they were Ph.D. students at Stanford University, in California. Together, they own about 14 percent of its
shares, and control 56 percent of the stockholder voting power through supervoting stock. They incorporated Google as a privately held company on
September 4, 1998. An initial public offering (IPO) took place on August 19, 2004, and Google moved to its new headquarters in $answ2,
California, nicknamed the Googleplex. In August 2015, Google announced plans to reorganize its various interests as a conglomerate
called Alphabet Inc. Google, Alphabet's leading subsidiary, will continue to be the umbrella company for Alphabet's Internet interests.
Upon completion of the restructure, Sundar Pichai became CEO of Google, replacing $answ3, who became CEO of Alphabet.
Rapid growth since incorporation has triggered a chain of products, acquisitions, and partnerships beyond Google's core search engine
(Google Search). It offers services designed for work and productivity (Google Docs, Sheets and Slides), email (Gmail/Inbox),
scheduling and time management (Google Calendar), cloud storage (Google Drive), social networking (Google+), instant messaging and
video chat (Google Allo/Duo), language translation (Google Translate), mapping and turn-by-turn navigation (Google Maps/Waze), video sharing (YouTube),
notetaking (Google Keep), and photo organizing and editing (Google Photos). The company leads the development of the Android mobile operating system,
the Google Chrome web browser, and Chrome OS, a lightweight operating system based on the Chrome browser. Google has moved increasingly into hardware;
from 2010 to 2015, it partnered with major electronics manufacturers in the production of its Nexus devices, and in October 2016, it released
multiple hardware products (including the Google Pixel smartphone, Home smart speaker, Wifi mesh wireless router, and Daydream View virtual reality headset).
The new hardware chief, $answ4, stated: "a lot of the innovation that we want to do now ends up requiring controlling the end-to-end user experience".
Google has also experimented with becoming an Internet carrier. In February 2010, it announced Google Fiber, a fiber-optic infrastructure that was
installed in Kansas City; in April 2015, it launched Project Fi in the United States, combining Wi-Fi and cellular networks from different providers;
and in 2016, it announced the Google Station initiative to make public Wi-Fi around the world,
with initial deployment in India.
'''
substitution = {
'answ1': '-1-',
'answ2': '-2-',
'answ3': '-3-',
'answ4': '-4-'
}
# solution of level 1
solution1 = {
'answ1': 'high',
'answ2': 'programming',
'answ3': 'programmers',
'answ4': 'program'
}
# solution of level 2
solution2 = {
'answ1': 'Dennis',
'answ2': '1989',
'answ3': 'I/O',
'answ4': 'K&R'
}
# solution of level 3
solution3 = {
'answ1': 'hardvard',
'answ2': 'Eduardo Saverin',
'answ3': '13',
'answ4': 'Desktop'
}
# solution of level 4
solution4 = {
'answ1': 'search',
'answ2': 'Mountain View',
'answ3': 'Larry Page',
'answ4': 'Rick Osterloh'
}
def check(answer, question, solution):
'''
answer: string --> answer that user typed
question: string --> the current question key in solution
solution: string --> the solution dictionary
return True if answer is the solution of the question
False if answer is not the solution
'''
if solution.get(question) != None and solution[question] == answer:
return True
else:
return False
def get_level():
'''
ask the user which level to play,
get the corresponding level template and solution,
return a tuple (level, solution)
level: Template --> the corresponding string template of the user selected level
solution: dict --> the solution dictionary of the corresponding level
'''
print('Please choose a difficulty level from easy / medium / hard / very hard')
levelTemplate = None
solution = None
while(solution == None):
level = str(input())
if(level == 'easy'):
# easy --> level 1
levelTemplate = Template(level_1)
solution = solution1
elif(level == 'medium'):
# medium --> level 2
levelTemplate = Template(level_2)
solution = solution2
elif(level == 'hard'):
# hard --> level 3
levelTemplate = Template(level_3)
solution = solution3
elif(level == 'very hard'):
# hard --> level 4
levelTemplate = Template(level_4)
solution = solution4
else:
# user didn't type any valid level
print('Please choose a real level that actually exists!')
print('You choosed level ' + level)
return (levelTemplate, solution)
def main():
levelTemplate, solution = get_level()
current = 1
total = len(solution)
#string = levelTemplate.substitute(substitution)
#print(levelTemplate.substitute(substitution))
#print(get_level())
while current <= total:
string = levelTemplate.substitute(substitution);
question = 'What should go in blank number ' + str(current) + '?'
print(string)
print(question)
answe = input()
fill = 'answ'+str(current)
if check(answe, fill, solution):
substitution[fill] = answe
print('Right Answer!')
current += 1
else:
print('Wrong Answer!')
print('\n--------------------------------------------------------------------------------\n')
print('Congratulation!!!!!!!! you won!')
main()
print('Want to play again')
print('y or n')
option=input()
if(option=='y'):
main()
else:
print("Thank You for playing the game")
|
[
"noreply@github.com"
] |
noreply@github.com
|
08abe221a6e2af6878de699a1d34b050881e8401
|
2b19f22b47dfd8c01ed2acf335a88ef3271de873
|
/IOI/difference.py
|
5b3c5f89ee7d33064fe73fa1fd734200918f80c5
|
[] |
no_license
|
Aminehassou/spaghetti-code
|
0e319bd4712d57bbea8e3714fd64698aa70719fd
|
f63c0596d5691bf2b2b3d622e8271983e10c0cfe
|
refs/heads/master
| 2023-01-04T11:59:43.668791
| 2019-12-17T22:28:04
| 2019-12-17T22:28:04
| 306,032,400
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 117
|
py
|
try:
while True:
a, b = (map(int, input().split()))
print (abs(a - b))
except EOFError:
pass
|
[
"japnoo@gmail.com"
] |
japnoo@gmail.com
|
6d706f8241a2a5b6d2b3ce4b097801335c94620c
|
93982df574e6ec73ae6a388a6f3657dd1034558b
|
/Scripts/tqdm-script.py
|
7bdb3aaa074e5f2468b03c6327f6114470a8c757
|
[
"MIT"
] |
permissive
|
robachkaya/LrnXPAnaToolbox
|
82f693ee029bfb8180e322c70563d06bc0fae3a0
|
626a48834be90fa88f1600d6deb8471025345c44
|
refs/heads/master
| 2022-12-11T14:44:42.534823
| 2020-09-12T15:20:30
| 2020-09-12T15:20:30
| 287,898,155
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 952
|
py
|
#!C:\Users\KU\LrnXPAnaToolbox\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'tqdm','console_scripts','tqdm'
import re
import sys
# for compatibility with easy_install; see #2198
__requires__ = 'tqdm'
try:
from importlib.metadata import distribution
except ImportError:
try:
from importlib_metadata import distribution
except ImportError:
from pkg_resources import load_entry_point
def importlib_load_entry_point(spec, group, name):
dist_name, _, _ = spec.partition('==')
matches = (
entry_point
for entry_point in distribution(dist_name).entry_points
if entry_point.group == group and entry_point.name == name
)
return next(matches).load()
globals().setdefault('load_entry_point', importlib_load_entry_point)
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(load_entry_point('tqdm', 'console_scripts', 'tqdm')())
|
[
"kayane-r@evidenceb.com"
] |
kayane-r@evidenceb.com
|
d59273261facac50fe7d7e864621450575693342
|
a1975777cc7440af16e1a4879db16c9297bbfa8b
|
/0x01-python-if_else_loops_functions/6-print_comb3.py
|
7180ee50a014f135d83aa1e923ee683510c3ce09
|
[] |
no_license
|
gorgyboy/holbertonschool-higher_level_programming
|
c604a29f58e800b2edaed5d17285cb7591d30c95
|
349f5c46fe571df97adf91c4dea9d7ebfbcea66e
|
refs/heads/master
| 2023-02-27T23:01:49.213122
| 2021-02-12T02:05:47
| 2021-02-12T02:05:47
| 292,097,157
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 215
|
py
|
#!/usr/bin/python3
for i in range(0, 10):
for j in range(0, 10):
if i == 8 and j == 9:
print('{:d}{:d}'.format(i, j))
elif j > i:
print('{:d}{:d}'.format(i, j), end=', ')
|
[
"jlrogb@gmail.com"
] |
jlrogb@gmail.com
|
6627eb8dd55746dc2174b3ba18fca002414f22d0
|
80b7edcff5de5cf8e090868923c9bc6a6415bea4
|
/spider_/sina/spider.py
|
7c31f0682695d7713bab05ccd379f9b87ad2e0a2
|
[] |
no_license
|
KonroyZhu/CHN-JPN-Relationship-1
|
808d0ce9f7232c88d123bcf51d0284ebd7520dbb
|
fe6860487d1d0b415f6d14779c4f7cc38000719b
|
refs/heads/master
| 2020-03-27T08:20:08.873413
| 2018-08-26T11:10:51
| 2018-08-26T11:10:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 933
|
py
|
from spider_.sina.GetPageLinks import URLGenerator,GetURLGroupByPage
from spider_.DBHelper import operate_with_sql
import requests
if __name__ == '__main__':
pageNum = 1
newsCount =1
# print(requests.get(URLGenerator(pageNum)).status_code)
# print(type(requests.get(URLGenerator(pageNum)).status_code))
while(requests.get(URLGenerator(pageNum)).status_code == 200):
dict_list = GetURLGroupByPage(URLGenerator(pageNum))
sql = "insert into sinalink (title,newsTime,link,src) VALUES (%s,%s,%s,%s)"
i = 1
for dic in dict_list:
print(dic)
sql = "insert into sinalink (title,newsTime,link,src) VALUES ('%s','%s','%s','%s')" % (dic["title"], dic["time"], dic["url"], dic["src"])
#print(sql)
operate_with_sql(sql)
print("第%d条新闻插入数据库成功!" %newsCount, "\n")
newsCount += 1
pageNum += 1
|
[
"476494643@qq.com"
] |
476494643@qq.com
|
5581f1877e0859073d8bad360dbf8d6e6b5ed449
|
43949d3f05bf1d1212cc25fd5766a47940723f7b
|
/generators/gen.py
|
5202bb3d33b604cfc3856de4de253eacca2d802f
|
[] |
no_license
|
asing177/python_programs
|
f6aa5b53b6f0b2d8c824c8b796ca77d8a1121110
|
dfa18f7ec6bd0eb29311a393e011d89dac1a7e26
|
refs/heads/master
| 2020-12-21T14:39:27.761117
| 2020-02-06T08:12:32
| 2020-02-06T08:12:32
| 236,462,869
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 290
|
py
|
def my_gen():
n = 1
print('This is printed first')
# Generator function contains yield statements
yield n
n += 1
print('This is printed second')
yield n
n += 1
print('This is printed at last')
yield n
a = my_gen()
next(a)
next(a)
next(a)
next(a)
|
[
"adityasingh27@hotmail.com"
] |
adityasingh27@hotmail.com
|
427ad4d206db8a5e4f376c716b47b039b82fba5a
|
033da72a51c76e5510a06be93229a547a538cf28
|
/Data Engineer with Python Track/03. Streamlined Data Ingestion with Pandas/Chapter/03. Importing Data from Databases/03-Selecting columns with SQL.py
|
9ce77ea8c2536cbf0133ba6d5c5008e87273de10
|
[] |
no_license
|
ikhwan1366/Datacamp
|
d5dcd40c1bfeb04248977014260936b1fb1d3065
|
7738614eaebec446842d89177ae2bc30ab0f2551
|
refs/heads/master
| 2023-03-06T13:41:06.522721
| 2021-02-17T22:41:54
| 2021-02-17T22:41:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,327
|
py
|
'''
Selecting columns with SQL
Datasets can contain columns that are not required for an analysis, like the weather table in data.db does. Some, such as elevation, are redundant, since all observations occurred at the same place, while others contain variables we are not interested in. After making a database engine, you'll write a query to SELECT only the date and temperature columns, and pass both to read_sql() to make a data frame of high and low temperature readings.
pandas has been loaded as pd, and create_engine() has been imported from sqlalchemy.
Note: The SQL checker is quite picky about column positions and expects fields to be selected in the specified order.
Instructions
100 XP
- Create a database engine for data.db.
- Write a SQL query that SELECTs the date, tmax, and tmin columns from the weather table.
- Make a data frame by passing the query and engine to read_sql() and assign the resulting data frame to temperatures.
'''
# Create database engine for data.db
engine = create_engine('sqlite:///data.db')
# Write query to get date, tmax, and tmin from weather
query = """
SELECT date,
tmax,
tmin
FROM weather;
"""
# Make a data frame by passing query and engine to read_sql()
temperatures = pd.read_sql(query, engine)
# View the resulting data frame
print(temperatures)
|
[
"surel.chandrapratama@gmail.com"
] |
surel.chandrapratama@gmail.com
|
54bd6011ca70bedd611d59e1380e35e74113df67
|
cd8969e8fc3edfc472650adeb5d2a44c662e8ffd
|
/main_display.py
|
a349b7a1ecc8bfa66e97d4656d495439debe3537
|
[] |
no_license
|
xarvh/bluegenesis
|
9e5053e83802f9d1cd8ddfa9f9209bb70e81fe22
|
b4221f45f1b2bfca700b9566bc65da724cd8c0e3
|
refs/heads/master
| 2021-01-19T06:20:13.879369
| 2012-07-12T23:57:58
| 2012-07-12T23:57:58
| 5,011,972
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,933
|
py
|
#!/usr/bin/python -B
"""
Loads and display populations through their whole evolutionary history.
"""
from pyglet.gl import *
import pyglet.window.key as key
import random
import sys
import cell
class Gui:
def __init__(self, pops):
# list of populations
self.pops = pops
# selected generation
self.sgen = 0
# selected individual
self.sind = 0
# displayed body
self.dbody = cell.Body(self.pops[0][0])
# actual GUI
self.window = pyglet.window.Window()
self.keys = key.KeyStateHandler()
self.window.push_handlers(self.keys)
self.window.on_draw = self.draw
def update(latency):
self.keyboard_input()
self.dbody.update()
pyglet.clock.schedule_interval(update, 0.1)
def keyboard_input(self):
if self.keys[key.LEFT]: self.select_individual(-1)
if self.keys[key.RIGHT]: self.select_individual(+1)
if self.keys[key.UP]: self.select_generation(-1)
if self.keys[key.DOWN]: self.select_generation(+1)
# if symbol == key.s: screenshot.take()
# if symbol == key.ENTER:
# print 'saving genome'
# open('new_genomes', 'ab').write(sw.body.genome + '\n')
def reset_body(self):
self.dbody = cell.Body(self.pops[self.sgen][self.sind])
def select_individual(self, step):
self.sind += step
self.sind %= len(self.pops[self.sgen])
self.reset_body()
def select_generation(self, step):
self.sgen += step
self.sgen %= len(self.pops)
self.reset_body()
def draw(self):
self.window.clear()
# glMatrixMode interferes with text rendering
glPushMatrix()
#glMatrixMode(GL_PROJECTION)
glLoadIdentity()
w = self.window.width
h = self.window.height
glTranslated(w/2, h/2, 0)
glScaled(h/2, h/2, 1) # want uniform coordinates
#glMatrixMode(GL_MODELVIEW)
glPushMatrix()
glScaled(.6, .6, 1)
self.dbody.draw()
glPopMatrix()
glPopMatrix()
# write label
text = "ind %d/%d, gen %d/%d, cells %d" % (
self.sind, len(self.pops[self.sgen]),
self.sgen, len(self.pops),
len(self.dbody)
)
glPushMatrix()
pyglet.text.Label(text,
font_name='Times New Roman', font_size=20,
x=5, y=5, anchor_x='left', anchor_y='bottom').draw()
glPopMatrix()
# perform operations that required drawing to be completed
def after_draw(self):
pass
#=========================================================================================
"""
class screenshot:
cnt = 0
@staticmethod
def take(name=None):
if not name:
screenshot.cnt += 1
name = 'shot%04d.png' % screenshot.cnt
pyglet.image.get_buffer_manager().get_color_buffer().save(name)
def after_draw(self):
if '--shot' in sys.argv:
screenshot.take('movie%04d.png' % self.total_cnt)
self.total_cnt += 1
if not self.free:
self.frames_cnt += 1
if self.frames_cnt >= self.frames_per_body:
if self.genomes:
self.next()
else:
sys.exit(0)
"""
# =============================================================================
def get_random_code():
return ''.join([ random.choice(cell.Cell.code_symbols) for i in xrange(1000)])
def main():
if len(sys.argv) > 1:
gui = Gui([gen.split('\n')[1:] for gen in open(sys.argv[1]).read().split('###')[1:]])
else:
gui = Gui([[get_random_code() for i in xrange(50)]])
pyglet.app.run()
if __name__ == '__main__':
main()
#EOF ==========================================================================
|
[
"francesco.orsenigo@gmail.com"
] |
francesco.orsenigo@gmail.com
|
2cfd9d1e429e9f0884a0381f4539ecb72222d40a
|
0cd461bdbe59eb80bf17483b1d20a3e6fbd52627
|
/api/permissions.py
|
f8d985a92fd5f646a58d9e6b498738e6fc47b4a0
|
[] |
no_license
|
cawirawa/SolarCarWebsiteBackend
|
f63420e81a4a02abf81c3db53b7fc7554e7c0bd2
|
c77e9badf2ec2bff20c16b4a25eff3303d78d5f0
|
refs/heads/master
| 2021-05-17T17:37:31.588220
| 2020-02-24T22:39:26
| 2020-02-24T22:39:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 472
|
py
|
from rest_framework import permissions
class IsAdminOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow admin to edit.
"""
def has_permission(self, request, view):
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS:
return True
return bool(request.user and request.user.is_staff)
|
[
"57200820+cawirawa@users.noreply.github.com"
] |
57200820+cawirawa@users.noreply.github.com
|
39071cb540b880168a7530ba80d4e213d0d935af
|
053c88dfbb2fcb3abe52360c00bd215d21d68702
|
/day07/part02.py
|
cf90574a44713af5af7d8c68e06ea45d1cc42eef
|
[] |
no_license
|
Berteun/adventofcode2020
|
9045ac243d439a81deb2ec89a5cb930a0ca7240d
|
d4c5f3840b1daf82c7b45486ebb94422ea1dc19c
|
refs/heads/master
| 2023-02-02T21:49:17.538146
| 2020-12-25T09:22:17
| 2020-12-25T09:22:17
| 319,113,433
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,025
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pprint
import re
from collections import defaultdict
re_target = re.compile(r'([0-9]+) (\w+ \w+)* bag')
re_line = re.compile(r'(\w+ \w+) bags contain (([0-9]+) (\w+ \w+)* bags?(, ([0-9]+) (\w+ \w+) bags?)*\.|no other bags\.)')
def parse_line(l):
graph = {}
match = re_line.match(l)
source = match[1]
graph[source] = {}
if "no other" not in match[2]:
targets = re_target.findall(match[2])
for (mult, target) in targets:
graph[source][target] = int(mult)
return graph
def read_input():
graph = {}
for l in open('input'):
graph.update(parse_line(l.strip()))
return graph
def count_bags(graph, source):
count = 1
if graph[source]:
for (target, weight) in graph[source].items():
count += weight * count_bags(graph, target)
return count
def main():
graph = read_input()
c = count_bags(graph, 'shiny gold')
print(c - 1)
if __name__ == '__main__':
main()
|
[
"berteun@collect.capital"
] |
berteun@collect.capital
|
e85cf0f50af8331522b769f23625204cf82b9036
|
86b119ba6d1fdb9c52e52349f26afb03798c0dd2
|
/test/modules.py
|
b0d2135dcb6da0de6bf433e00c000d84e80f6b81
|
[] |
no_license
|
18520165/email-client-schoolapp
|
8c2141c0a0b1f9ff59639bd28f6ef9284404e1e9
|
537f82c391bb9e3c84e14c30889c0942ae7e4f63
|
refs/heads/master
| 2022-11-07T02:20:00.270742
| 2020-06-18T01:39:50
| 2020-06-18T01:39:50
| 274,380,927
| 0
| 0
| null | 2020-06-23T10:50:11
| 2020-06-23T10:50:11
| null |
UTF-8
|
Python
| false
| false
| 35
|
py
|
#import modules here
import tkinter
|
[
"phu.nguyenduc@protonmail.com"
] |
phu.nguyenduc@protonmail.com
|
736472c422eb4afbc5a6cb0d3490681b26a0055a
|
e2d3b55d6c13702897ac5fd8b5514b013908dec6
|
/test/testGPIO.py
|
7f8f3fab1f2c7dc7f1e16bb45cdbdaca7c4be6fb
|
[] |
no_license
|
Giacky91/TermoPi
|
5abf1201c25075594e289b883c9032af9eb24110
|
b459a3205630ee27f7b6cb8b3ae0a0ed3bb366b5
|
refs/heads/master
| 2021-01-09T06:31:13.074138
| 2017-02-05T15:44:27
| 2017-02-05T15:44:27
| 81,000,867
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
#!/usr/bin/env python
import os
import RPi.GPIO as GPIO
import time
import datetime
import sys
#Realy pin
FAN_PIN = 12
#set up pins
def GPIOsetup():
# GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(FAN_PIN, GPIO.OUT)
GPIOsetup()
while True:
GPIO.output(FAN_PIN, GPIO.HIGH)
time.sleep(1)
GPIO.output(FAN_PIN, GPIO.LOW)
time.sleep(1)
|
[
"noreply@github.com"
] |
noreply@github.com
|
9341e2577663f6d4c526369034176c1df2fa5ec1
|
f9593837802294805fd938c09fec968e37539923
|
/manufacture/urls/__init__.py
|
361f41b8c6f0124ed4c91b3053c5b1b3ae638d05
|
[] |
no_license
|
aleksey-zhigulin/manufacture.azbuka-kamnya
|
9e046e1ca7e6f80bad3a387a32e90619caa8ce25
|
0817accc556fae2223ecedd1ee4d5ecfff53d1ab
|
refs/heads/master
| 2021-01-08T12:58:09.752917
| 2016-06-27T15:05:32
| 2016-06-27T15:05:32
| 61,612,731
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,038
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from cms.sitemaps import CMSSitemap
from django.conf import settings
from django.conf.urls import * # NOQA
from django.conf.urls.i18n import i18n_patterns
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
import forms_builder.forms.urls
from zinnia.sitemaps import TagSitemap
from zinnia.sitemaps import EntrySitemap
from zinnia.sitemaps import CategorySitemap
from manufacture.views import rules_list
admin.autodiscover()
urlpatterns = patterns('',
url(r'^rosetta/', include('rosetta.urls')),
url(r'^blog/', include('zinnia.urls', namespace='zinnia')),
url(r'^comments/', include('django_comments.urls')),
url(r'^admin/', include(admin.site.urls)), # NOQA
url(r'^forms/', include(forms_builder.forms.urls)),
url(r'^robots\.txt$', rules_list, name='robots_rule_list'),
url(r'^sitemap\.xml$', 'django.contrib.sitemaps.views.sitemap',
{'sitemaps': {
'cmspages': CMSSitemap,
'tags': TagSitemap,
'blog': EntrySitemap,
'categories': CategorySitemap,
}}),
url(r'^select2/', include('django_select2.urls')),
url(r'^', include('cms.urls')),
url(r'^stone/', include('shop.urls', namespace='shop')),
)
# This is only needed when using runserver.
if settings.DEBUG:
urlpatterns = patterns('',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', # NOQA
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
) + staticfiles_urlpatterns() + urlpatterns # NOQA
|
[
"Азбука Камня"
] |
Азбука Камня
|
333c48d27ec8d5b0ea5633bffadd6e27638c0522
|
315450354c6ddeda9269ffa4c96750783963d629
|
/CMSSW_7_0_4/src/Configuration/TotemCommon/python/__init__.py
|
6cf341a440a2c973942ba5aad7ebac154203e274
|
[] |
no_license
|
elizamelo/CMSTOTEMSim
|
e5928d49edb32cbfeae0aedfcf7bd3131211627e
|
b415e0ff0dad101be5e5de1def59c5894d7ca3e8
|
refs/heads/master
| 2021-05-01T01:31:38.139992
| 2017-09-12T17:07:12
| 2017-09-12T17:07:12
| 76,041,270
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 202
|
py
|
#Automatically created by SCRAM
import os
__path__.append(os.path.dirname(os.path.abspath(__file__).rsplit('/Configuration/TotemCommon/',1)[0])+'/cfipython/slc6_amd64_gcc481/Configuration/TotemCommon')
|
[
"eliza@cern.ch"
] |
eliza@cern.ch
|
abe23fc6fb7654cb65cd62fc3e96e7129a366a37
|
b1ad494c5124d9c95c16695f229d351b521d72ed
|
/coordinate_translator.py
|
b9e734386c86a510c6e4525e98457f378577c984
|
[] |
no_license
|
Torbye/CoordinateTranslator
|
6b0e6d59eb08d3b251708bc2eaee52d7e30047f7
|
84e3a8096a793b1aaa205924d9b2791ad87c2334
|
refs/heads/master
| 2020-05-17T20:11:26.466560
| 2019-05-03T07:10:14
| 2019-05-03T07:10:14
| 183,937,816
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,220
|
py
|
# Written in Python3 by Torjus Bye, 2019. https://github.com/Torbye
import math
def car2pol(coordinate_list): # Converts cartesian coordinates to polar [X, Y] -> [Radius, Angle].
x=coordinate_list[0]
y=coordinate_list[1]
return [math.sqrt(math.pow(x,2)+math.pow(y,2)), math.degrees(math.atan2(y,x))]
def pol2car(coordinate_list): # Converts polar coordinates to cartesian [Radius, Angle] -> [X, Y].
r=coordinate_list[0]
a=math.radians(coordinate_list[1])
return [r*math.cos(a), r*math.sin(a)]
def sph2car(coordinate_list): # Converts spherical coordiates to cartesian [Radius, Inclination, Azimuth] -> [X, Y, Z].
i = pol2car([coordinate_list[0], coordinate_list[1]])
z = i[1]
i = pol2car([i[0], coordinate_list[2]])
return [i[0], i[1], z]
def car2sph(coordinate_list): # Converts cartesian to spherical coordinates [X, Y, Z] -> [Radius, Inclination, Azimuth].
x = coordinate_list[0]
y = coordinate_list[1]
z = coordinate_list[2]
radius = math.sqrt(math.pow(x,2)+math.pow(y,2)+math.pow(z,2))
inclination = math.degrees(math.acos(z/radius))
azimuth = math.degrees(math.atan2(y,x))
return [radius, inclination, azimuth]
|
[
"noreply@github.com"
] |
noreply@github.com
|
fd5e4214aa5f2427a904e2b6c07f2bd9b0be7d30
|
0c138335422136116f79a5588c7ac87761c7b140
|
/Week 4/Following_Links_in_HTML_Using_Beautiful_Soup.py
|
c2a94654348c0ed5333787861230f061667ed73f
|
[] |
no_license
|
mahinanwar/Using-Python-To-Access-Web-Data
|
32769d905d40defd03eece6dacd4928a6772bf01
|
6db46965473ecfe519f8cfa93f8f6f16e44962c6
|
refs/heads/master
| 2022-11-13T20:05:02.453711
| 2020-06-24T17:19:15
| 2020-06-24T17:19:15
| 274,727,241
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,795
|
py
|
#In this assignment you will write a Python program that expands on
#http://www.py4e.com/code3/urllinks.py. The program will use urllib to read the
#HTML from the data files below, extract the href= vaues from the anchor tags,
#scan for a tag that is in a particular position relative to the first name in
#the list, follow that link and repeat the process a number of times and report
#the last name you find.
#We provide two files for this assignment. One is a sample file where we give
#you the name for your testing and the other is the actual data you need to
#process for the assignment
#Sample problem: Start at http://py4e-data.dr-chuck.net/known_by_Fikret.html
#Find the link at position 3 (the first name is 1). Follow that link. Repeat
#this process 4 times. The answer is the last name that you retrieve.
#Sequence of names: Fikret Montgomery Mhairade Butchi Anayah
#Last name in sequence: Anayah
#Actual problem: Start at: http://py4e-data.dr-chuck.net/known_by_Nathanial.html
#Find the link at position 18 (the first name is 1). Follow that link. Repeat
#this process 7 times. The answer is the last name that you retrieve.
#Hint: The first character of the name of the last page that you will load is: R
import urllib.request, urllib.parse, urllib.error
from bs4 import BeautifulSoup
url = input('Enter Url: ')
count = int(input("Enter count: "))
position = int(input("Enter position:"))
for i in range(count):
html = urllib.request.urlopen(url).read()
soup = BeautifulSoup(html,'html.parser')
tags = soup('a')
s = []
t = []
for tag in tags:
x = tag.get('href', None)
s.append(x)
y = tag.text
t.append(y)
print(s[position-1])
print(t[position-1])
url = s[position-1]
|
[
"noreply@github.com"
] |
noreply@github.com
|
afb1aed5d1c621b18f1115384a24b8b8a23d2dc5
|
03b8ea6ab1b571ff9aff42083843d33b91d948a3
|
/complete-python-developer-2021/First sections/testing/tests.py
|
198f774951e7692f2a2737fcab4beb4cfe495eed
|
[] |
no_license
|
emanueldumitru/flask-bootcamp-marcial
|
c036362071511edc2bfb8474213b7374517b1dcb
|
ddc42cd8ec057ce1d13d6baeb211e06e9795f140
|
refs/heads/main
| 2023-08-22T17:16:41.597891
| 2021-10-11T19:48:50
| 2021-10-11T19:48:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 293
|
py
|
# pylint
# pyflakes
# pep8
# pytest
# python -m unittest -v
import unittest
import main
class TestMain(unittest.TestCase):
def test_do_stuff(self):
test_param = 10
result = main.do_stuff(test_param)
self.assertEqual(result, 15)
unittest.main()
|
[
"emanuel.dumitru9@gmail.com"
] |
emanuel.dumitru9@gmail.com
|
9163c34c39e77bff0131e957efdc3df3b708d624
|
ad3b84b514e06cb93361c5761c50b89d5353c089
|
/urbanmap/apps.py
|
b30b424dd451ee4ca68bcbfb5817fabb69c1cd46
|
[] |
no_license
|
ndufrane/imiomap
|
d6bb9c8b077db8b323da850af7112f3d186e310c
|
76121d8f78200d914d30e8424f043207fd1085d0
|
refs/heads/master
| 2021-06-24T17:06:58.349070
| 2019-05-10T13:58:31
| 2019-05-10T13:58:31
| 161,181,416
| 0
| 1
| null | 2020-11-24T08:54:55
| 2018-12-10T13:44:13
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 96
|
py
|
from django.apps import AppConfig
class UrbanmapConfig(AppConfig):
name = 'urbanmap'
|
[
"dufrane.nicolas@gmail.com"
] |
dufrane.nicolas@gmail.com
|
4b5866d8bbf0ce95adcf8251e117486d1bad3dba
|
dacdebab897f9287f37a2e85c5705a926ddd36aa
|
/tests/profile/Snakefile
|
0b94737e2cc8387c702c84b7ed884b17ad3dfbc4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
snakemake/snakemake
|
5d4528193d87786d7b372ca7653ece302ff46965
|
27b224ed12448df8aebc7d1ff8f25e3bf7622232
|
refs/heads/main
| 2023-09-02T08:37:04.323976
| 2023-08-11T10:02:34
| 2023-08-11T10:02:34
| 212,840,200
| 1,941
| 536
|
MIT
| 2023-09-11T09:51:44
| 2019-10-04T14:58:11
|
HTML
|
UTF-8
|
Python
| false
| false
| 358
|
rule all:
input: expand("{name}.d", name=range(1000))
rule:
output: "{name}.a"
shell: "touch {output}"
rule:
input: "{name}.a"
output: "{name}.b"
shell: "touch {output}"
rule:
input: "{name}.b"
output: "{name}.c"
shell: "touch {output}"
rule:
input: "{name}.c"
output: "{name}.d"
shell: "touch {output}"
|
[
"johannes.koester@uni-due.de"
] |
johannes.koester@uni-due.de
|
|
e9d73bb6b0922771279e7b81cdfdb17254747be0
|
1cecdf336a6952b208f30c2924eb0457953aa2d5
|
/rundaemon.py
|
5f6f63e831b3ce48126633c9456dc1d15dc3816b
|
[] |
no_license
|
alanemiliano/fiscalberry
|
6b941f235ffe4c5045b434628963cb105cabc25f
|
1a5ac93561b8fe60ae4946fe66acd6ddf76fdc30
|
refs/heads/master
| 2021-09-01T21:29:43.011322
| 2017-12-28T18:30:57
| 2017-12-28T18:30:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,254
|
py
|
#!/usr/bin/env python2.7
import os
import argparse
import daemon
import sys
import logging
from daemon import pidfile
from FiscalberryApp import FiscalberryApp
def do_something():
### This does the "work" of the daemon
fbserver = FiscalberryApp()
fbserver.start()
def start_daemon(pidf, logf):
### This launches the daemon in its context
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.FileHandler('/var/log/fiscalberry_daemon.log')
logger.addHandler(handler)
rootpath = os.path.dirname(__file__)
### XXX pidfile is a context
with daemon.DaemonContext(
stdout=handler.stream,
stderr=handler.stream,
working_directory=rootpath,
umask=0o002,
pidfile=pidfile.TimeoutPIDLockFile(pidf),
files_preserve=[handler.stream]
) as context:
do_something()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="PaxaPos Daemon Service")
parser.add_argument('-p', '--pid-file', default='/var/run/fiscalberry.pid')
parser.add_argument('-l', '--log-file', default='/var/log/fiscalberry.log')
args = parser.parse_args()
start_daemon(pidf=args.pid_file, logf=args.log_file)
|
[
"alevilar@gmail.com"
] |
alevilar@gmail.com
|
d485cc88de5469d66c7dbc503dbb0e3206144138
|
b1ffcbd977595bccf15dd56e965bda62867d1e10
|
/omrdatasettools/downloaders/PrintedMusicSymbolsDatasetDownloader.py
|
818cb93eca0478aa48e520f084374eba04cbd048
|
[
"CC-BY-NC-SA-4.0",
"GPL-2.0-only",
"CC-BY-SA-3.0",
"MIT",
"GPL-1.0-or-later",
"CC-BY-SA-4.0",
"LicenseRef-scancode-public-domain",
"AGPL-3.0-only"
] |
permissive
|
fzalkow/OMR-Datasets
|
7ded5bb9278e47c84a16de01081876d6bb2e6dbe
|
c9e7a986199998d6a735875503e6dcce5fdf1193
|
refs/heads/master
| 2020-09-14T15:30:45.824800
| 2020-01-06T12:07:52
| 2020-01-06T12:07:52
| 223,169,792
| 0
| 0
|
MIT
| 2019-11-21T12:32:31
| 2019-11-21T12:32:30
| null |
UTF-8
|
Python
| false
| false
| 1,896
|
py
|
import argparse
import os
from omrdatasettools.downloaders.DatasetDownloader import DatasetDownloader
class PrintedMusicSymbolsDatasetDownloader(DatasetDownloader):
""" Loads the Printed Music Symbols dataset
https://github.com/apacha/PrintedMusicSymbolsDataset
Copyright 2017 by Alexander Pacha under MIT license
"""
def get_dataset_download_url(self) -> str:
# If this link does not work anymore, find the images at https://github.com/apacha/PrintedMusicSymbolsDataset
return "https://github.com/apacha/OMR-Datasets/releases/download/datasets/PrintedMusicSymbolsDataset.zip"
def get_dataset_filename(self) -> str:
return "PrintedMusicSymbolsDataset.zip"
def download_and_extract_dataset(self, destination_directory: str):
if not os.path.exists(self.get_dataset_filename()):
print("Downloading Printed Music Symbol dataset...")
self.download_file(self.get_dataset_download_url(), self.get_dataset_filename())
print("Extracting Printed Music Symbol dataset...")
absolute_path_to_temp_folder = os.path.abspath('PrintedMusicSymbolsDataset')
self.extract_dataset(absolute_path_to_temp_folder)
DatasetDownloader.copytree(os.path.join(absolute_path_to_temp_folder, "PrintedMusicSymbolsDataset"),
os.path.abspath(destination_directory))
self.clean_up_temp_directory(absolute_path_to_temp_folder)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset_directory",
type=str,
default="../data/printed_images",
help="The directory, where the extracted dataset will be copied to")
flags, unparsed = parser.parse_known_args()
dataset = PrintedMusicSymbolsDatasetDownloader()
dataset.download_and_extract_dataset(flags.dataset_directory)
|
[
"alexander.pacha@gmail.com"
] |
alexander.pacha@gmail.com
|
f0792bb6b2ff080f26d50f8b8461833c96f6caba
|
2a5e4c1307e6e961eafeaeb454187453c3a525ea
|
/md5.py
|
8bc080904d0c07b09a0beae19f84c2bb10f1e012
|
[] |
no_license
|
hoangkimzxc/MD-5-Dynamic-Salt-Hash
|
0ed57d5cacdc4849391c2366b1f74d670a153775
|
fc5efdab0862d53f5c7e115e4e1b08976d990417
|
refs/heads/main
| 2023-07-24T04:32:14.172062
| 2021-08-26T10:20:22
| 2021-08-26T10:20:22
| 388,497,778
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,316
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 20 21:46:02 2021
@author: hoang
"""
import struct
from enum import Enum
from math import (
floor,
sin,
)
from bitarray import bitarray
class MD5Buffer(Enum):
A = 0x67452301
B = 0xEFCDAB89
C = 0x98BADCFE
D = 0x10325476
class MD5(object):
_string = None
_buffers = {
MD5Buffer.A: None,
MD5Buffer.B: None,
MD5Buffer.C: None,
MD5Buffer.D: None,
}
@classmethod
def hash(cls, string):
cls._string = string
preprocessed_bit_array = cls._step_2(cls._step_1())
cls._step_3()
cls._step_4(preprocessed_bit_array)
return cls._step_5()
@classmethod
def _step_1(cls):
# Convert the string to a bit array.
bit_array = bitarray(endian="big")
bit_array.frombytes(cls._string.encode("utf-8"))
# Pad the string with a 1 bit and as many 0 bits required such that
# the length of the bit array becomes congruent to 448 modulo 512.
# Note that padding is always performed, even if the string's bit
# length is already conguent to 448 modulo 512, which leads to a
# new 512-bit message block.
bit_array.append(1)
while len(bit_array) % 512 != 448:
bit_array.append(0)
# For the remainder of the MD5 algorithm, all values are in
# little endian, so transform the bit array to little endian.
return bitarray(bit_array, endian="little")
@classmethod
def _step_2(cls, step_1_result):
# Extend the result from step 1 with a 64-bit little endian
# representation of the original message length (modulo 2^64).
length = (len(cls._string) * 8) % pow(2, 64)
length_bit_array = bitarray(endian="little")
length_bit_array.frombytes(struct.pack("<Q", length))
result = step_1_result.copy()
result.extend(length_bit_array)
return result
@classmethod
def _step_3(cls):
# Initialize the buffers to their default values.
for buffer_type in cls._buffers.keys():
cls._buffers[buffer_type] = buffer_type.value
@classmethod
def _step_4(cls, step_2_result):
# Define the four auxiliary functions that produce one 32-bit word.
F = lambda x, y, z: (x & y) | (~x & z)
G = lambda x, y, z: (x & z) | (y & ~z)
H = lambda x, y, z: x ^ y ^ z
I = lambda x, y, z: y ^ (x | ~z)
# Define the left rotation function, which rotates `x` left `n` bits.
rotate_left = lambda x, n: (x << n) | (x >> (32 - n))
# Define a function for modular addition.
modular_add = lambda a, b: (a + b) % pow(2, 32)
# Compute the T table from the sine function. Note that the
# RFC starts at index 1, but we start at index 0.
T = [floor(pow(2, 32) * abs(sin(i + 1))) for i in range(64)]
# The total number of 32-bit words to process, N, is always a
# multiple of 16.
N = len(step_2_result) // 32
# Process chunks of 512 bits.
for chunk_index in range(N // 16):
# Break the chunk into 16 words of 32 bits in list X.
start = chunk_index * 512
X = [step_2_result[start + (x * 32) : start + (x * 32) + 32] for x in range(16)]
# Convert the `bitarray` objects to integers.
X = [int.from_bytes(word.tobytes(), byteorder="little") for word in X]
# Make shorthands for the buffers A, B, C and D.
A = cls._buffers[MD5Buffer.A]
B = cls._buffers[MD5Buffer.B]
C = cls._buffers[MD5Buffer.C]
D = cls._buffers[MD5Buffer.D]
# Execute the four rounds with 16 operations each.
for i in range(4 * 16):
if 0 <= i <= 15:
k = i
s = [7, 12, 17, 22]
temp = F(B, C, D)
elif 16 <= i <= 31:
k = ((5 * i) + 1) % 16
s = [5, 9, 14, 20]
temp = G(B, C, D)
elif 32 <= i <= 47:
k = ((3 * i) + 5) % 16
s = [4, 11, 16, 23]
temp = H(B, C, D)
elif 48 <= i <= 63:
k = (7 * i) % 16
s = [6, 10, 15, 21]
temp = I(B, C, D)
# The MD5 algorithm uses modular addition. Note that we need a
# temporary variable here. If we would put the result in `A`, then
# the expression `A = D` below would overwrite it. We also cannot
# move `A = D` lower because the original `D` would already have
# been overwritten by the `D = C` expression.
temp = modular_add(temp, X[k])
temp = modular_add(temp, T[i])
temp = modular_add(temp, A)
temp = rotate_left(temp, s[i % 4])
temp = modular_add(temp, B)
# Swap the registers for the next operation.
A = D
D = C
C = B
B = temp
# Update the buffers with the results from this chunk.
cls._buffers[MD5Buffer.A] = modular_add(cls._buffers[MD5Buffer.A], A)
cls._buffers[MD5Buffer.B] = modular_add(cls._buffers[MD5Buffer.B], B)
cls._buffers[MD5Buffer.C] = modular_add(cls._buffers[MD5Buffer.C], C)
cls._buffers[MD5Buffer.D] = modular_add(cls._buffers[MD5Buffer.D], D)
@classmethod
def _step_5(cls):
# Convert the buffers to little-endian.
A = struct.unpack("<I", struct.pack(">I", cls._buffers[MD5Buffer.A]))[0]
B = struct.unpack("<I", struct.pack(">I", cls._buffers[MD5Buffer.B]))[0]
C = struct.unpack("<I", struct.pack(">I", cls._buffers[MD5Buffer.C]))[0]
D = struct.unpack("<I", struct.pack(">I", cls._buffers[MD5Buffer.D]))[0]
# Output the buffers in lower-case hexadecimal format.
return f"{format(A, '08x')}{format(B, '08x')}{format(C, '08x')}{format(D, '08x')}"
|
[
"noreply@github.com"
] |
noreply@github.com
|
541e5fb2da0f4d116e9e305b03a30bc97c22062c
|
6176efac2f93f78c611fb1ecac2483133281c8d6
|
/1.tutorials/67.oops13.py
|
f346760451721b342b5de9eb79e73cfad916a5f4
|
[] |
no_license
|
BarinderSingh30/Python_Basic
|
f10f00e932a6533d1bb8bf9cd9783186fac1d513
|
64b8689cd9b20d64d6da3905b3752a7c7fb415ea
|
refs/heads/main
| 2023-06-23T08:09:33.217233
| 2021-07-21T16:03:14
| 2021-07-21T16:03:14
| 375,288,819
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 627
|
py
|
#operator overloading and dunding method
class A:
def __init__(self,name,salary,role) -> None:
self.name=name
self.salary=salary
self.role=role
def __add__(self,other): #dunder method , help in operator overloading
return self.salary + other.salary
def __repr__(self) -> str:
return f'His name is {self.name}.\nHis salary is {self.salary}.\nHis role is {self.role}.\n'
def __str__(self) -> str:
return 'who is the boss? hmm'
emp1=A('Barinder',400,"programmer")
emp2=A('harsh',1000,'coder')
print(emp1 + emp2)
print(emp1)
print(repr(emp1))
|
[
"emailforbarinder@gmail.com"
] |
emailforbarinder@gmail.com
|
28cdce0f8159d0261b10e4c251a76ba3ad02c726
|
bd06b28588e4d9c9d615c319f05256f90d2b9173
|
/姜英利/class71/GuessNumber.py
|
8336807c8cb6aadcf50e307ddb6fabec8067f016
|
[] |
no_license
|
jiangyingli/python20190601
|
6baeeda9d6658353c0dd764e9913ae89dcdc873e
|
59e6c9a75cc2f3c9e09c4c2b9bba22dac51c347a
|
refs/heads/master
| 2021-06-28T08:27:37.561846
| 2020-11-30T13:42:33
| 2020-11-30T13:42:33
| 194,200,502
| 9
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 758
|
py
|
import random
# 投币,无限次玩游戏,每猜猜一次扣2币,猜对奖励6币
# 可以随时退出,退出的时候退币
num = input("请投币")
coin = int(num)
rand = random.randint( 1,10 )
#疑问,投币数量和游戏次数有无关系
while(coin >= 2):
cmd = input("请输入一个值,范围(1-10),退出请按0!")
if(cmd=="0"):
print("退您"+str(coin)+"个游戏币,请收好!")
break;
coin = coin - 2
userinput = int(cmd)
if(userinput>rand):
print("大大大大了")
elif(userinput<rand):
print("小了")
elif(userinput==rand):
coin = coin + 6
print("对了")
print("退出游戏!欢迎再来")
# ctrl+c ctrl+v ctrl+a tab shift+tab
|
[
"jiangyingli@126.com"
] |
jiangyingli@126.com
|
d486e86f158860f487fa6a1659cf36422f4530fe
|
f9ad8e434c4766bb29f3e1419dfb11361d70fd1e
|
/lab7_10.py
|
567c917fa8eb208fdc9bb98beb35f3e9e17dead7
|
[] |
no_license
|
Maria105/python_lab
|
3060ff26247c81253fe7d058e17d6ff7d88bad8e
|
5a235e2938b07db8b3dff1eaea57cd530df2d4a9
|
refs/heads/master
| 2020-03-28T10:27:59.928146
| 2019-01-11T14:01:48
| 2019-01-11T14:01:48
| 148,111,814
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 277
|
py
|
#!/usr/bin/env python3
# -*- codding:utf-8 -*-
def input_text() -> str:
"""Input message: """
text = input('enter you text: ')
return (text)
def min_word(text: str) -> str:
"""Find smallest word"""
return min(text.split())
print(min_word(input_text()))
|
[
"noreply@github.com"
] |
noreply@github.com
|
8f92acd743de6224fa78192b2abb2e2385ad176b
|
ccda9baee5219204d425895d3c484bb781388011
|
/instagram/urls.py
|
09c4dcf019e33cab4163959446c3ffca169991b9
|
[
"MIT"
] |
permissive
|
JeremiahNgige/instagram-clone
|
fbe26f526d4d193903d6feeed602712d91ddabbe
|
c8cb10a9f3ba9ae09a584ccf957adf6b057672ca
|
refs/heads/master
| 2022-12-30T06:27:22.988220
| 2020-10-22T19:36:30
| 2020-10-22T19:36:30
| 304,614,905
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 976
|
py
|
"""instagram URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('instaclone.urls'))
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"brianjeremy123789@gmail.com"
] |
brianjeremy123789@gmail.com
|
31e0764953dcbe50ed8f4d2ad453bbeb18f03f34
|
0f573c8be4a665ccfabbfe81ee084d096a282414
|
/mysite/blog/admin.py
|
c13f5bd3f4c0e9b589f1285b777e8b3c09e3978d
|
[] |
no_license
|
dwilliams328/DjangoBlog
|
4be6ee486c08807798f7a6d9cfdac42e505237d0
|
9e067b27da74713ae321526b3d6de97ecc61a82d
|
refs/heads/master
| 2020-04-23T23:16:51.239893
| 2019-02-27T21:34:12
| 2019-02-27T21:34:12
| 171,530,837
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 151
|
py
|
from django.contrib import admin
# Import and register model to make it visible on the admin page.
from .models import Post
admin.site.register(Post)
|
[
"dwilliams@icstars.org"
] |
dwilliams@icstars.org
|
221197ebbc49db96c73981d99a32e9cf8e6e7e13
|
14078fd8a13a531ffd0575dc3b4d91b7a22492db
|
/hci.py
|
429f070055ff1f1403166d05fa55454fb0fe2442
|
[] |
no_license
|
peterliu/hci-view
|
db93de725707213d2bd428de786dc357da11fc53
|
da50432a9fbe9fdeb7c7e7533b40af4d14f4ec5a
|
refs/heads/master
| 2021-01-18T14:26:18.751047
| 2014-09-04T08:14:53
| 2014-09-04T08:14:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,569
|
py
|
import sys
from msg import msg_analyze
msg_db = []
def storeMsg(msg_time, msg_direction, msg_msg):
global msg_db
this_msg = {'time' : msg_time,
'direction' : msg_direction,
'msg' : msg_msg
}
msg_db += [this_msg]
def loadSrcFile(file_name, direction):
f_in = open(file_name)
lines = f_in.readlines()
# 消息分段时间 50 us
# time_split = 5e-5
# 消息分段时间 200 us
time_split = 2e-4
# 上次单个数据发生的时间,处理完一个数据后更新
t = -100.0
# 消息数据
msg_data = []
# 消息发生时间
msg_time = -100
# 排除第一行
for line in lines[1:]:
q = line.split(',')
o_time = float(q[0])
o_date = int(q[1], 16)
# 消息分段
if (o_time - t) > time_split:
if msg_time >= 0:
storeMsg(msg_time, direction, msg_data)
msg_time = o_time
msg_data = [o_date]
else:
msg_data += [o_date]
t = o_time
# 最后一条消息
if msg_time >= 0:
storeMsg(msg_time, direction, msg_data)
f_in.close()
# 读取文件
loadSrcFile('tx.txt', 'TX')
loadSrcFile('rx.txt', 'RX')
# 按时间排序
stored_msg_db = sorted(msg_db, cmp=lambda x,y:cmp(x['time'],y['time']))
for msg in stored_msg_db:
# 输出消息数据
print "[%5.6f %s] " % (msg['time'] , msg['direction']) ,
for i in msg['msg']:
print "%02X" % i ,
print
# 如果命令行参数为 '-d' 或 '-D' 解析消息内容
if (len(sys.argv) == 2):
if (sys.argv[1] == '-D') or (sys.argv[1] == '-d'):
msg_analyze(msg['msg'])
|
[
"peter9882@gmail.com"
] |
peter9882@gmail.com
|
62b085c047b06c3239657128e85b386f92823e3f
|
9a3543fc4b82f1ce1037005853cfd658fa3e772c
|
/ex35.py
|
c74b5a3b844762cc434a9c803708678eb9086686
|
[] |
no_license
|
standbyme227/study_python1
|
44c0e357f395fc008b3343ad63bb1743d0c32ab4
|
f2b8516835397d3310a14f9f6bf006532b8fc5f9
|
refs/heads/master
| 2020-05-20T00:47:32.302453
| 2019-08-07T08:07:46
| 2019-08-07T08:07:46
| 185,294,694
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,497
|
py
|
from sys import exit
def 황금_방():
print("황금으로 가득 찬 방입니다. 얼마나 가져갈까요?")
선택 = input("> ")
if isinstance(int(선택), int):
액수 = int(선택)
else:
죽음("인간이여, 숫자 쓰는 법부터 배우세요.")
if 액수 < 50:
print("좋아, 욕심부리지 않는군요. 당신이 이겼습니다.")
exit(0)
else:
죽음("욕심쟁이 얼간이 같으니!")
def 곰_방():
print("여기에는 곰이 한 마리 있습니다.")
print("곰은 꿀을 잔뜩 들고 있습니다.")
print("뚱뚱한 곰은 다른 쪽 문 앞에 있습니다.")
print("어떻게 곰을 움직이겠습니까?")
print("> 꿀 뺏기")
print("> 곰 놀리기")
print("> 문열기")
곰이_움직임 = False
while True:
선택 = input("> ")
if 선택 == "꿀 뺏기":
죽음("곰이 당신을 쳐다보더니 목이 떨어져라 따귀를 날립니다.")
elif 선택 == "곰 놀리기" and not 곰이_움직임:
print("곰이 문에서 비켜섰습니다.")
print("이제 나갈 수 있습니다.")
곰이_움직임 = True
elif 선택 == "곰 놀리기" and 곰이_움직임:
죽음("곰이 머리 끝까지 열받아 당신의 다리를 씹어먹습니다")
elif 선택 == "문열기" and 곰이_움직임:
황금_방()
else:
print("무슨 말을 하는 건지 모르겠네요.")
def 크툴루_방():
print("여기에서는 대악마 크툴루를 봅니다.")
print("그분이, 그것이, 아니 뭐든지 간에 당신을 쳐다보고 당신은 미쳐갑니다.")
print("목숨을 위해 달아나려냐 네 머리를 먹어치우려냐?")
print("> 달아나기")
print("> 먹기")
선택 = input("> ")
if "달아나기" in 선택:
출발()
elif "먹기" in 선택:
죽음("음 맛이 좋군요")
else:
크툴루_방()
def 죽음(이유):
print(이유, "잘 했어요!")
exit(0)
def 출발():
print("어두운 방에 있습니다.")
print("오른쪽과 왼쪽에는 문이 있습니다.")
print("어느 쪽을 고를까요?")
선택 = input("> ")
if 선택 == "왼쪽":
곰_방()
elif 선택 == "오른쪽":
크툴루_방()
else:
죽음("문 주위에서 맴돌기만 하다 굶어 죽었습니다.")
출발()
|
[
"standbyme227@algorithmlabs.co.kr"
] |
standbyme227@algorithmlabs.co.kr
|
f813381d640daa2f7c3c0c699383e9bdd36a6e1e
|
34271ef9e02ea728cb713c933853b33dd815f286
|
/app.py
|
e63b0126f2e763193895573edd4beb50291debf5
|
[] |
no_license
|
AxlEstevez/api_rest_productos
|
38c8aa93922abad0eac0af7dd656c6a24dc37597
|
5a6d35aa0ea631f1550eac759f28d163b8058cda
|
refs/heads/master
| 2022-12-09T09:36:49.830053
| 2020-09-14T00:33:09
| 2020-09-14T00:33:09
| 295,224,317
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,341
|
py
|
#!/usr/bin/python3
#-*- coding: utf-8 -*-
# importaciones necesarias para poder correr python del lado
# del servidor
from flask import Flask, render_template, redirect, request
import lxml # Se usara para validad documentos xml con su xsd.
import xmlschema # otra libreria para validad xml
# Modulos para poder acceder a los elementos del archivo XML.
import xml.etree.ElementTree as Et
# Métodos propios para taratar xml.
from xmlMod import *
# ---------------------------------------------------------
# Se importa la libreria producto solo con el fin
# de crear objetos de tipo producto y tratar de manera
# más fácil la información.
from producto import Producto
# ---------------------------------------------------------
# métodos especificos para este proyecto.
from modules import *
app = Flask(__name__) # Se crea un Objeto flask para poder
# manejar datos y rutas con python desde el servidor.
xml = 'static/xml/productos.xml'# ruta al archivo xml
xsd = 'static/xml/productos.xsd'# ruta al archivo xsd
xslt = 'static/xml/productos.xsl'# ruta al archivo xsl
indexAux = 'indexAux.html' # archivo html donde se vizualizaran errores.
agrega = 'agrega.html' # vista de agregar producto nuevo.
# ruta al Schema del producto
# ---------------------------------------------------------
# Este Schema se usa para validar que la información
# enviada por el usuario corresponda a las reglas del xml
# que se usa como "base de datos" para alacenaje de
# productos.
# ---------------------------------------------------------
productoSchema = 'static/xml/producto.xsd'
productoXML = 'static/xml/producto.xml'
@app.route('/')
def index():
# ruta del archivo html donde se hará la transformacion
html = 'templates/index.html'
# se verifica que el archivo xml este bien formado.
if validateXML(xml,xsd):
# se verifica si exiten productos en el archvo xml
if countElements(xml) > 0:
# si exite al menos un producto se genera la
# vista del archivo xml.
mkhtml(xslt,xml,html)
else:
# Si no existe ningún producto se direcciona a la
# página inical con un mensaje para "notificar" que
# no existen productos aún.
return render_template(
indexAux,
message = "Sin elementos agregados"
)
else:
# si el archivo xml esta corrompido o no es valido
# con su schema correspondiente se le notifica
# al usuario que hubo un problema al procesar la
# petición de la página.
return render_template(
indexAux,
message = "Error en al procesar petición."
)
return render_template('index.html')
@app.route('/add')
def rutaAgregar():
return render_template('agrega.html')
@app.route('/add', methods=['POST'])
def agregaProducto():
if request.method == 'POST':
# se crea un Objeto de tipo Producto
# para poder generar el xml correspondiente a
# un producto y validarlo.
producto = Producto(
request.form['nombre'],
request.form['precio'],
request.form['descripcion'],
request.form['marca'],
request.form['imagen'],
request.form['codigo']
)
# se crea el archivo xml del producto.
mkXML(producto)
# se valida que el xml corresponda con lo que se
# espera recibir.
if validateXML(productoXML,productoSchema):
addAnaquel(productoXML,xml)
if validateXML(xml,xsd):
return redirect('/')
else:
return render_template(
agrega,
message = 'Erro en la petición'
)
else:
return render_template(agrega)
@app.route('/getProducto',methods=['POST'])
def getProducto():
xsltProducto = 'static/xml/producto.xsl'
xmlProducto = productoXML
htmlProduc = 'templates/producto.html'
if request.method == 'POST':
# se obtiene los datos del producto que el usuario
# pide..
codigo = request.form['codigo']
producto = getInfo(codigo,xml)
if producto != None:
mkXML(producto)
mkhtml(xsltProducto,xmlProducto,htmlProduc)
return render_template('producto.html')
else:
return render_template(
indexAux,
error = 'Error en la petición'
)
else:
return render_template(
'indexAux.html',
error = 'Error en la petición'
)
@app.route('/delete', methods=['POST'])
def delete():
if request.method == 'POST':
# se elimina el nodo del archivo xml
restAnaquel(productoXML,xml)
return redirect('/')
else:
return render_template(
indexAux,
error = 'error al procesar petición'
)
@app.route('/update')
def update():
xsltUp = 'static/xml/productoUpdate.xsl'
xmlUp = 'static/xml/producto.xml'
htmlUp = 'templates/update.html'
mkhtml(xsltUp,xmlUp,htmlUp)
return render_template('update.html')
@app.route('/update', methods=['POST'])
def updateProducto():
if request.method == 'POST':
# Aux como su nombre lo indica es una variable
# auxiliar en caso que el usuario modifique el
# código de barras, con esto se tiene un "registro"
# para posteriormente modificar el producto en el
# anaquel.
aux = productoXML
restAnaquel(aux,xml)
newProdcuto = Producto(
request.form['nombre'],
request.form['precio'],
request.form['descripcion'],
request.form['marca'],
request.form['imagen'],
request.form['codigo']
)
mkXML(newProdcuto)
if validateXML(aux,productoSchema):
addAnaquel(aux,xml)
else:
return render_template(
indexAux,
message = 'Error al procesar la petición.'
)
return redirect('/')
else:
return render_template(
indexAux,
message = 'Error al procesar la petición.'
)
if __name__ == "__main__":
app.run(port=5000,debug=True)
|
[
"lacura4ever@gmail.com"
] |
lacura4ever@gmail.com
|
1b58373ac66d0ddb7dc9aeda27c62602f8569f74
|
de707c94c91f554d549e604737b72e6c86eb0755
|
/math/0x01-plotting/2-change_scale.py
|
480bf1b1af07c3d6f3b678dbbd1083ba7969a535
|
[] |
no_license
|
ejonakodra/holbertonschool-machine_learning-1
|
885cf89c1737573228071e4dc8e26304f393bc30
|
8834b201ca84937365e4dcc0fac978656cdf5293
|
refs/heads/main
| 2023-07-10T09:11:01.298863
| 2021-08-11T03:43:59
| 2021-08-11T03:43:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
#!/usr/bin/env python3
""" plots x, y as a line graph where y-axis is scaled logarithmically """
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(0, 28651, 5730)
r = np.log(0.5)
t = 5730
y = np.exp((r / t) * x)
plt.plot(x, y)
plt.xlabel('Time (years)')
plt.ylabel('Fraction Remaining')
plt.title("Exponential Decay of C-14")
plt.yscale("log")
plt.xlim((0, 28650))
plt.show()
|
[
"eislek02@gmail.com"
] |
eislek02@gmail.com
|
5ddfb724efcc821a79c4e342fe9315c9e87c4d99
|
038e6e41d117431869edad4952a5b1463d5131bc
|
/donations/urls.py
|
f1cd42016e4516213c853992d4476f9cab832f42
|
[
"MIT"
] |
permissive
|
MikaelSantilio/aprepi-django
|
c49290855b7c83ecaf08de82ee9eedf8e8baa15a
|
5e2b5ecffb287eab929c0759ea35ab073cc19d96
|
refs/heads/master
| 2023-06-19T00:18:15.986920
| 2021-06-15T20:15:59
| 2021-06-15T20:15:59
| 329,428,268
| 0
| 1
|
MIT
| 2021-02-05T16:21:45
| 2021-01-13T20:50:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,043
|
py
|
from django.urls import path
from donations import views
app_name = "donations"
urlpatterns = [
path('', views.MakeDonation.as_view(), name='unique-donation'),
path('historico/', views.DonationListView.as_view(), name='list'),
# path('checkout/<str:value>', views.MPCheckout.as_view(), name='mp-checkout'),
# path('anonima/', views.MakeAnonymousDonation.as_view(), name='anonymous-donation'),
# path('recorrente/', views.MakeRecurringDonation.as_view(), name='recurring-donation'),
path('obrigado/', views.ThankYouView.as_view(), name='thankyou'),
# path('cartoes/', views.CreditCardListView.as_view(), name='list-cc'),
# path('cartoes/cadastrar', views.CreditCardCreateView.as_view(), name='create-cc'),
# path('cartoes/<int:pk>', views.CreditCardDetailView.as_view(), name='detail-cc'),
# path('cartoes/atualizar/<int:pk>',
# views.CreditCardUpdateView.as_view(), name='update-cc'),
# path('cartoes/apagar/<int:pk>',
# views.CreditCardDeleteView.as_view(), name='delete-cc')
]
|
[
"mikael.santilio@gmail.com"
] |
mikael.santilio@gmail.com
|
1d006bd3a0ab3787892cc88a2cdfe5d1becfdec4
|
3267642d2ef6311863b54c4196b064e8d01ab10a
|
/projects/migrations/0006_auto_20200821_0958.py
|
40af3a374d3e80e723f8e858fcded087d0ba1ffa
|
[] |
no_license
|
pracowniaPK/Portfolio
|
6ad425eaef957da195195073365d64f95ba6ad1a
|
cd862f80d60120a8ba55e281b678f1b9d5a90f10
|
refs/heads/master
| 2022-12-07T00:15:15.724251
| 2020-09-02T21:01:13
| 2020-09-02T21:01:13
| 285,631,529
| 0
| 0
| null | 2020-08-13T22:47:29
| 2020-08-06T17:30:18
|
Python
|
UTF-8
|
Python
| false
| false
| 337
|
py
|
# Generated by Django 3.1 on 2020-08-21 09:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('projects', '0005_auto_20200820_1854'),
]
operations = [
migrations.AlterModelOptions(
name='project', options={'ordering': ['priority']},
),
]
|
[
"kaczory5@gmail.com"
] |
kaczory5@gmail.com
|
44a673b2bafd755b74bd2d6410ccb5e39e996f78
|
ea79ea47e8d499eb222e07c2d830a3af969bb98d
|
/conf.py
|
15fbcc3270727f7578ebb349b9a7a48bd7bf7435
|
[] |
no_license
|
kabaj/symfony-docs-pl
|
3bac5e98adff90997ce8501f77ae0fa84fb43cd1
|
f781901ba37bce5d97eb6f28a1875e3a8c30c59f
|
refs/heads/master
| 2021-01-16T22:50:05.730297
| 2016-02-10T21:24:41
| 2016-02-10T21:24:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,434
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# symfony-docs-pl documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 30 21:52:40 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('_exts'))
sys.path.append(os.path.abspath('_theme/_exts'))
# adding PhpLexer
from sphinx.highlighting import lexers
from pygments.lexers.compiled import CLexer
from pygments.lexers.special import TextLexer
from pygments.lexers.text import RstLexer
from pygments.lexers.web import PhpLexer
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo',
'sensio.sphinx.refinclude', 'sensio.sphinx.configurationblock', 'sensio.sphinx.phpcode',
'sensio.sphinx.bestpractice', 'symfonycom.sphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_theme/_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'symfony-docs-pl'
copyright = u'2013-2016, Społeczność Symfony'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '2'
# The full version, including alpha/beta/rc tags.
# release = '2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'pl'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '_theme']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# enable highlighting for PHP code not between ``<?php ... ?>`` by default
lexers['markdown'] = TextLexer()
lexers['php'] = PhpLexer(startinline=True)
lexers['php-annotations'] = PhpLexer(startinline=True)
lexers['php-standalone'] = PhpLexer(startinline=True)
lexers['php-symfony'] = PhpLexer(startinline=True)
lexers['rst'] = RstLexer()
lexers['varnish3'] = CLexer()
lexers['varnish4'] = CLexer()
config_block = {
'markdown': 'Markdown',
'rst': 'reStructuredText',
'varnish3': 'Varnish 3',
'varnish4': 'Varnish 4'
}
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# use PHP as the primary domain
primary_domain = 'php'
# set url for API links
api_url = 'http://api.symfony.com/master/%s'
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'SymfonyDocPL'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'symfony-docs-pl.tex', u'Polska dokumentacja Symfony',
u'Społeczność Symfony', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'symfony-docs-pl', u'Polska dokumentacja Symfony',
[u'Społeczność Symfony'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'symfony-docs-pl', u'Polska dokumentacja Symfony',
u'Społeczność Symfony', 'symfony-docs-pl', u'Polski przekład oficjalnej dokumentacji Symfony',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = 'symfony-docs-pl'
epub_author = u'Społeczność Symfony'
epub_publisher = u'Społeczność Symfony'
epub_copyright = u'2013-2016, Społeczność Symfony'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Use PHP syntax highlighting in code examples by default
highlight_language='php'
|
[
"andrzej@abutronik.pl"
] |
andrzej@abutronik.pl
|
629ff9ed2b366ac2773776f14140f0935e748f54
|
052f4e1705141b84c93218658bc38773cd40088a
|
/scripts/python/dem_processing/dem_perf_check.py
|
baac67fd21f301f847a1913666767ad256087f28
|
[] |
no_license
|
bipinmsit/mycode
|
5417f50cef5066990b34d9a7fa65066b5e6bcda1
|
f392d25b0825bee050a8f0f84a990cac83bcac6e
|
refs/heads/master
| 2022-11-21T08:49:04.932399
| 2020-07-18T17:28:25
| 2020-07-18T17:28:25
| 105,014,101
| 2
| 0
| null | 2017-12-07T07:31:41
| 2017-09-27T12:15:11
|
Python
|
UTF-8
|
Python
| false
| false
| 6,036
|
py
|
from raster_processing.raster_chunks import GeoChunks as gc
import numpy as np
from subprocess import call
import matplotlib.pyplot as plt
import gdal
import sys
def match_shape_DEM(referenceDEM,terrainDEM,tempDEM="./temp_1234.tif"):
"""
Ensures that shape of reference DEM and the new terrain DEM are the same.
This is done by:
1. Ensuring that the GSD/Resolution/PixelSize match, in both
2. Ensure that the extent of both images are the size
"""
ref_dem = gc(referenceDEM)
new_dem = gc(terrainDEM)
try:
assert(ref_dem.data_bands[0].XSize == new_dem.data_bands[0].XSize)
assert(ref_dem.data_bands[0].YSize == new_dem.data_bands[0].YSize)
except:
"""
Assumptions:-
1. TerrainDEM already has the same projection system as the original DEM
To-Do:-
1. Get extent of reference Image
2. Get projection of reference Image (..required?)
3. Use extent, projection, and pixel size of original image with GDAl to
force creation of new file with same size as reference DEM
"""
#Geographic Extents in X direction
x_min = str(ref_dem.geo_extent_x[0])
x_max = str(ref_dem.geo_extent_x[1])
#Geographic Extents in Y direction
y_min = str(ref_dem.geo_extent_y[0])
y_max = str(ref_dem.geo_extent_y[1])
#Pixel Resolution along both axes
x_size = str(ref_dem.geo_trans[1])
y_size = str(ref_dem.geo_trans[5])
ref_dem = None
new_dem = None
#Code to change size of terrainDEM to size of reference DEM
call(["gdalwarp", "-te", x_min, y_min, x_max, y_max, "-tr",
x_size, y_size, "-overwrite", terrainDEM, tempDEM])
call(["mv", tempDEM, terrainDEM])
pass
pass
def create_delta_DEM(referenceDEM, terrainDEM, deltaDEM="./delta.tif"):
"""
For each co-ordinate in referenceDEM and terrainDEM,
create a grid where value of each co-ordinate is
delta[x, y] = referenceDEM[x, y] - terrainDEM[x, y]
referenceDEM = "/path/to/referenceDEM/dem"
terrainDEM = "/path/to/interpolated/dem"
"""
ref_dem = gc(referenceDEM)
new_dem = gc(terrainDEM)
delta_dem = gc.create_from(ref_dem, deltaDEM)
#Ensuring that the size of the two rasters are the same
try:
assert(ref_dem.data_bands[0].XSize == new_dem.data_bands[0].XSize)
assert(ref_dem.data_bands[0].YSize == new_dem.data_bands[0].YSize)
except:
print('Error. Size of input DEMs do not match.',file=sys.stderr)
return 2
if ref_dem.data_bands[0].XSize > gc.x_chunk_size:
for ch in ref_dem.break_chunks():
ref_arr = ref_dem.read(chunk=ch)
ref_mask = np.where(ref_arr == ref_dem.no_data_value)
ref_arr[ref_mask] = 0
new_arr = new_dem.read(chunk=ch)
new_arr[ref_mask] = 0
delta_arr = ref_arr - new_arr
delta_dem.write(delta_arr,chunk=ch)
else:
ref_arr = ref_dem.read()
ref_mask = np.where(ref_arr == ref_dem.no_data_value)
ref_arr[ref_mask] = 0
new_arr = new_dem.read()
new_arr[ref_mask] = 0
delta_arr = ref_arr - new_arr
delta_dem.write(delta_arr)
delta_dem = None
return 0
pass
def apply_mask_poly(baseDEM, maskPoly,maskDEM):
"""
Apply mask to base to get maskDEM as output.
Values that are masked out will be set to No data value of the baseDEM.
"""
ref_dem = gc(baseDEM)
base_geo_trans = ref_dem.geo_trans
#Geographic Extents in X direction
x_min = str(ref_dem.geo_extent_x[0])
x_max = str(ref_dem.geo_extent_x[1])
#Geographic Extents in Y direction
y_min = str(ref_dem.geo_extent_y[0])
y_max = str(ref_dem.geo_extent_y[1])
xsize = str(base_geo_trans[1])
ysize = str(base_geo_trans[5])
call(["gdalwarp", "-ot", "Float32", "-of", "GTiff", "-cutline", maskPoly,
"-te", x_min, y_min, x_max, y_max,"-overwrite","-crop_to_cutline",
"-tr", xsize, ysize, baseDEM, maskDEM])
print("Mask Operation Completed.\nOutput file present at " + maskDEM)
pass
def DEM_histogram(histDEM, default=False, hist_min = -50, hist_max = 50, hist_nbins=1000):
"""
gives me a dictionary of value, freq pairs
for each distinct value in the deltaDEM which
is not removedValue
"""
hist_gtif = gc(histDEM)
if default is False:
hist_tuple = hist_gtif.data_bands[0].GetHistogram(buckets=hist_nbins,min=hist_min,max=hist_max)
hist_arr = np.array(hist_tuple)
hist_keys = np.linspace(hist_min, hist_max, hist_nbins)
hist_dict = dict(zip(hist_keys, hist_arr))
plt.bar(hist_keys, hist_arr, align='center')
plt.show()
return hist_dict
else:
hist_tuple = hist_gtif.data_bands[0].GetDefaultHistogram()
hist_min = hist_tuple[0]
hist_max = hist_tuple[1]
hist_nbins = hist_tuple[2]
hist_arr = np.array(hist_tuple[3])
hist_keys = np.linspace(hist_min, hist_max, hist_nbins)
hist_dict = dict(zip(hist_keys, hist_arr))
plt.bar(hist_keys, hist_arr, align='center')
plt.show()
return hist_dict
def performance(hist, minVal, meanTreeHeight, k, maxVal):
"""
True/False indicating whether hist is within bounds
"""
if min(hist.keys()) < minVal:
return False
if max(hist.keys()) > maxVal:
return False
pass
"""
variance calc
In [46]: for ch in chnks:
...: arr = []
...: for dem in dem_arr:
...: d_arr = dem.read(chunk=ch)
...: d_arr[np.where(d_arr == dem.no_data_value)] = 0
...: arr.append(d_arr)
...: stacked = np.dstack(tuple(arr))
...: var_arr = np.var(stacked,axis=2)
...: var_arr[np.where(var_arr == 0)] = var_out.no_data_value
...: var_out.write(var_arr,chunk=ch)
...:
"""
|
[
"bipin.kumar@aspecscire.com"
] |
bipin.kumar@aspecscire.com
|
f37bf6d7e69087bad285023e03ef4723bb8ba36b
|
397e125e94f4f139f2bf5055824d81f24b8b1757
|
/ABC/145/B.py
|
c96cb419eb5372858a0ff144fa21e734a83f0e9e
|
[] |
no_license
|
tails1434/Atcoder
|
ecbab6ee238e3f225551297db961b1b502841fa4
|
e7c7fed36be46bbaaf020a70997842240ba98d62
|
refs/heads/master
| 2021-07-07T00:31:49.235625
| 2020-09-30T01:42:01
| 2020-09-30T01:42:01
| 189,009,622
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
def main():
N = int(input())
S = input()
if N % 2 != 0:
print('No')
else:
A = S[:N//2]
B = S[N//2:N]
if A == B:
print('Yes')
else:
print('No')
if __name__ == "__main__":
main()
|
[
"sososo1333@gmail.com"
] |
sososo1333@gmail.com
|
1c3fe0cc2873b5858a438ae7dbeaf43f1ace5c25
|
8c6466e12bb3351031c25677127dc86d13bd9b19
|
/Project data Modelling with Postgress/sql_queries.py
|
09f272907dc0ece9579ce135decdb08810006f0f
|
[] |
no_license
|
andreodendaal/udacity_data_engineering
|
ac8eb889db002014b3ccf1fe15b16f77361b8d55
|
03524ffbd1830d168761fcc996cab329dd064977
|
refs/heads/master
| 2020-05-05T09:24:02.921194
| 2020-01-15T20:22:14
| 2020-01-15T20:22:14
| 179,902,884
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,481
|
py
|
# DROP TABLES
songplay_table_drop = "DROP TABLE IF EXISTS songplays;"
user_table_drop = "DROP TABLE IF EXISTS users;"
song_table_drop = "DROP TABLE IF EXISTS songs;"
artist_table_drop = "DROP TABLE IF EXISTS artists;"
time_table_drop = "DROP TABLE IF EXISTS songplays;"
# CREATE TABLES
songplay_table_create = ("""CREATE TABLE IF NOT EXISTS songplays (songplay_id varchar, start_time timestamp, user_id varchar, level varchar, song_id varchar, artist_id varchar, session_id varchar, location varchar, user_agent varchar, PRIMARY KEY (songplay_id));""")
user_table_create = ("""CREATE TABLE IF NOT EXISTS users (user_id varchar, first_name varchar, last_name varchar, gender varchar, level varchar, PRIMARY KEY (user_id));
""")
song_table_create = ("""CREATE TABLE IF NOT EXISTS songs (song_id varchar, title varchar, artist_id varchar, year int, duration int, PRIMARY KEY (song_id));
""")
#song_id title artist_id year duration
artist_table_create = ("""CREATE TABLE IF NOT EXISTS artists (artist_id varchar, name varchar, location varchar, lattitude float, longitude float, PRIMARY KEY (artist_id));
""")
# https://www.postgresql.org/docs/9.1/functions-datetime.html
time_table_create = ("""CREATE TABLE IF NOT EXISTS time (start_time timestamp, hour int, day int, week int, month int, year int, weekday int, PRIMARY KEY (start_time));
""")
# INSERT RECORDS
songplay_table_insert = ("""INSERT INTO songplays (songplay_id, start_time, user_id, level, song_id, artist_id, session_id, location, user_agen) VALUES(%s, %s, %s, %s, %s, %s, %s, %s);""")
user_table_insert = ("""INSERT INTO users (user_id, first_name, last_name, gender, level) VALUES(%s, %s, %s, %s, %s ) ON CONFLICT (user_id) DO NOTHING;""")
song_table_insert = ("""INSERT INTO songs (song_id, title, artist_id, year, duration) VALUES(%s, %s, %s, %s, %s);""")
artist_table_insert = ("""INSERT INTO artists (artist_id, name, location, lattitude, longitude) VALUES(%s, %s, %s, %s, %s);""")
time_table_insert = ("""INSERT INTO time (start_time, hour, day, week, month, year, weekday) VALUES(%s, %s, %s, %s, %s, %s, %s);""")
# FIND SONGS
song_select = ("""
""")
# QUERY LISTS
#create_table_queries = [songplay_table_create]
create_table_queries = [songplay_table_create, user_table_create, song_table_create, artist_table_create, time_table_create]
#drop_table_queries = [songplay_table_drop]
drop_table_queries = [songplay_table_drop, user_table_drop, song_table_drop, artist_table_drop, time_table_drop]
|
[
"aodendaal.direct@gmail.com"
] |
aodendaal.direct@gmail.com
|
17886f2c49f51a24b121c87812d0111097c21985
|
f47755f746c316cfdac8afaefe6d149aa77bc4e5
|
/cloudmesh/rest/shell/shell.py
|
052d890a4c3d2c4cb6cea1c4671b1c59ab97593b
|
[
"Apache-2.0"
] |
permissive
|
karthik-anba/rest
|
a3705b9349ebf7e6e1b4f21036301529adfd3dc8
|
b575a5bc16a5352f87c107fadb435935d5b66746
|
refs/heads/master
| 2021-01-21T10:46:23.433663
| 2017-02-23T01:20:23
| 2017-02-23T01:20:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,358
|
py
|
#
# in our rest architecture we want to interface to the backend systems while
# using a secure rest service. I
# Internally we will use the many fnctions that cloudmesh_client provides.
# Before we use them we need to implement some elementary functions
# lets first do administrative functions in an admin commond
# pseudo code: task implement
from __future__ import print_function
import importlib
import pkgutil
import pydoc
import sys
import textwrap
from cmd import Cmd
from cloudmesh_client.shell.command import PluginCommand
from cloudmesh_client.shell.command import command
import cloudmesh
from cloudmesh.rest.server. mongo import Mongo
import inspect
from cloudmesh_client.common.dotdict import dotdict
def print_list(elements):
for name in elements:
print("*", name)
class plugin(object):
@classmethod
def modules(cls):
module_list = []
package = cloudmesh
for importer, modname, ispkg in pkgutil.walk_packages(path=package.__path__,
prefix=package.__name__ + '.',
onerror=lambda x: None):
module_list.append(modname)
return module_list
@classmethod
def classes(cls):
module_list = cls.modules()
commands = []
for module in module_list:
if module.startswith('cloudmesh.ext.command.'):
commands.append(module)
return commands
@classmethod
def name(cls, command):
command_name = "do_" + command
class_name = "cloudmesh.ext.command." + command + "." \
+ command.capitalize() + "Command"
return class_name, command_name
@classmethod
def class_name(cls, command):
return "cloudmesh.ext.command." + command + "." \
+ command.capitalize() + "Command"
@classmethod
def load(cls, commands=None):
"""
:param commands: If None the commands will be found from import cloudmesh
Otherwise the commands can be explicitly specified with
commands = [
'cloudmesh.ext.command.bar.BarCommand',
'cloudmesh.ext.command.foo.FooCommand',
]
A namespace packege must exists. Foo and Bar ar just examples
:return: the classes of the command
"""
if commands is None:
commands = [c.split('.')[-1] for c in cls.classes()]
# print_list(commands)
COMMANDS = [cls.class_name(c) for c in commands]
commands = [getattr(importlib.import_module(mod), cls) for (mod, cls) in
(commands.rsplit(".", 1) for commands in COMMANDS)]
return commands
plugin.load()
PluginCommandClasses = type(
'CommandProxyClass',
tuple(PluginCommand.__subclasses__()),
{})
class CMShell(Cmd, PluginCommandClasses):
prompt = 'cms> '
banner = textwrap.dedent("""
+=======================================================+
. ____ _ _ _ .
. / ___| | ___ _ _ __| |_ __ ___ ___ ___| |__ .
. | | | |/ _ \| | | |/ _` | '_ ` _ \ / _ \/ __| '_ \ .
. | |___| | (_) | |_| | (_| | | | | | | __/\__ \ | | | .
. \____|_|\___/ \__,_|\__,_|_| |_| |_|\___||___/_| |_| .
+=======================================================+
Cloudmesh Rest Shell
""")
#
# List all commands that start with do
#
@command
def do_help(self, args, arguments):
"""
::
Usage:
help
Description:
help - List of all registered commands
"""
print("Help")
print("====")
methodList = [n for n, v in inspect.getmembers(self, inspect.ismethod)]
functionList = [n for n, v in inspect.getmembers(self, inspect.isfunction)]
commands = methodList + functionList
for c in sorted(commands):
if c.startswith("do_"):
print(c.replace("do_", ""), end=' ')
print ()
return ""
@command
def do_info(self, args, arguments):
"""
::
Usage:
info [commands|package|help]
Description:
info
provides internal info about the shell and its packages
"""
arguments = dotdict(arguments)
module_list = plugin.modules()
if arguments.commands:
commands = plugin.classes()
print_list(commands)
elif arguments.help:
for name in module_list:
p = "cloudmesh." + name
strhelp = p + " not found."
try:
strhelp = pydoc.render_doc(p, "Help on %s" + "\n" + 79 * "=")
except Exception, e:
pass
print(strhelp)
else:
print_list(module_list)
@command
def do_admin(self, args, arguments):
"""
::
Usage:
admin [db|rest] start
admin [db|rest] stop
admin db backup
admin db reset
admin status
Description:
db start
starts the database service
db stop
stops the database service
db backup
creates abackup of the database
db reset
resets the database
Arguments:
FILE a file name
Options:
-f specify the file
"""
arguments = dotdict(arguments)
print(arguments)
if arguments.db and arguments.stop:
print("PLEASE stop db")
m = Mongo()
m.stop()
elif arguments.db and arguments.start:
print("PLEASE start db")
m = Mongo()
m.start()
elif arguments.rest and arguments.start:
print("PLEASE start rest")
# m = Eve()
# m.start()
elif arguments.rest and arguments.stop:
print("PLEASE stop rest")
# m = Eve()
# m.stop()
elif arguments.start:
m = Mongo()
r = m.start()
print(r)
# start mong, start eve
pass
elif arguments.stop:
m = Mongo()
r = m.stop()
print(r)
# stop eve
pass
elif arguments.status:
m = Mongo()
r = m.status()
print(r)
def preloop(self):
"""adds the banner to the preloop"""
lines = textwrap.dedent(self.banner).split("\n")
for line in lines:
# Console.cprint("BLUE", "", line)
print(line)
# noinspection PyUnusedLocal
def do_EOF(self, args):
"""
::
Usage:
EOF
Description:
Command to the shell to terminate reading a script.
"""
return True
# noinspection PyUnusedLocal
def do_quit(self, args):
"""
::
Usage:
quit
Description:
Action to be performed whne quit is typed
"""
return True
do_q = do_quit
def emptyline(self):
return
#def main():
# CMShell().cmdloop()
def inheritors(klass):
subclasses = set()
work = [klass]
while work:
parent = work.pop()
for child in parent.__subclasses__():
if child not in subclasses:
subclasses.add(child)
work.append(child)
return subclasses
def do_gregor(line):
print("gregor")
# noinspection PyBroadException
def main():
"""cms.
Usage:
cms --help
cms [--echo] [--debug] [--nosplash] [-i] [COMMAND ...]
Arguments:
COMMAND A command to be executed
Options:
--file=SCRIPT -f SCRIPT Executes the script
-i After start keep the shell interactive,
otherwise quit [default: False]
--nosplash do not show the banner [default: False]
"""
def manual():
print(main.__doc__)
args = sys.argv[1:]
arguments = {
'--echo': '--echo' in args,
'--help': '--help' in args,
'--debug': '--debug' in args,
'--nosplash': '--nosplash' in args,
'-i': '-i' in args}
echo = arguments["--echo"]
if arguments['--help']:
manual()
sys.exit()
for a in args:
if a in arguments:
args.remove(a)
arguments['COMMAND'] = [' '.join(args)]
commands = arguments["COMMAND"]
if len(commands) > 0:
if ".cm" in commands[0]:
arguments["SCRIPT"] = commands[0]
commands = commands[1:]
else:
arguments["SCRIPT"] = None
arguments["COMMAND"] = ' '.join(commands)
if arguments["COMMAND"] == '':
arguments["COMMAND"] = None
# noinspection PySimplifyBooleanCheck
if arguments['COMMAND'] == []:
arguments['COMMAND'] = None
splash = not arguments['--nosplash']
debug = arguments['--debug']
interactive = arguments['-i']
script = arguments["SCRIPT"]
command = arguments["COMMAND"]
#context = CloudmeshContext(
# interactive=interactive,
# debug=debug,
# echo=echo,
# splash=splash)
cmd = CMShell()
# if script is not None:
# cmd.do_exec(script)
try:
if echo:
print(cmd.prompt, command)
if command is not None:
cmd.precmd(command)
stop = cmd.onecmd(command)
cmd.postcmd(stop, command)
except Exception as e:
print("ERROR: executing command '{0}'".format(command))
print(70 * "=")
print(e)
print(70 * "=")
if interactive or (command is None and script is None):
cmd.cmdloop()
if __name__ == '__main__':
main()
|
[
"laszewski@gmail.com"
] |
laszewski@gmail.com
|
9900fb23966c7170f49463766fb9144b67096696
|
6323bd983f6304d95e62909bfc4883d2f9ef1a14
|
/Leetcode/Medium/Range Sum query.py
|
4e7d5a0537c1ad2d9022d5981e76015b68d98328
|
[] |
no_license
|
akshay-sahu-dev/PySolutions
|
4c2d67d5f66fe83a6e302e1742a5bf17dafe2b99
|
83552962805768914034a284bf39197f52ca5017
|
refs/heads/master
| 2023-06-17T06:36:50.252943
| 2021-07-09T17:28:53
| 2021-07-09T17:28:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 355
|
py
|
## https://leetcode.com/problems/range-sum-query-immutable
class NumArray:
def __init__(self, nums: List[int]):
self.nums = nums
def sumRange(self, i: int, j: int) -> int:
return sum(self.nums[i:j+1])
# Your NumArray object will be instantiated and called as such:
# obj = NumArray(nums)
# param_1 = obj.sumRange(i,j)
|
[
"akki5233@gmail.com"
] |
akki5233@gmail.com
|
40b601ccb51ae4cd83ee67ed6b070c74ef5e537f
|
cae96d56eb65a4b6969adf25bafeab75c25cb330
|
/kbtuwebBack/hhBack/api/views.py
|
a8c92f0978d9d788c3e69cd64d661808316d7525
|
[] |
no_license
|
justdiasik/Project
|
ae5ff68c957db6a38709038d90768113d447e8ff
|
7f9570a6a1e7622574efd8ebbc262a032271655c
|
refs/heads/main
| 2023-04-30T02:18:01.229148
| 2021-05-03T15:28:27
| 2021-05-03T15:28:27
| 352,089,381
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,003
|
py
|
from django.shortcuts import render
from django.http.request import HttpRequest
from .models import Company,Vacancy
from django.http.response import JsonResponse
# Create your views here.
def company_list(request):
if request.method == 'GET':
companies = Company.objects.all()
companies_json = [company.to_json() for company in companies]
return JsonResponse(companies_json, safe=False)
elif request.method == 'POST':
pass
def company_detail(request, company_id):
try:
company = Company.objects.get(id=company_id)
company_json = company.to_json()
except Company.DoesNotExist as e:
return JsonResponse({'error': str(e)})
return JsonResponse(company_json, safe=False)
def vacancies_by_companyId(request, company_id):
try:
test_for_existOfCompany = Company.objects.get(id=company_id)
vacancies_by_company = Vacancy.objects.filter(company=company_id)
vacancies_json = [vacancy.to_json() for vacancy in vacancies_by_company]
except Company.DoesNotExist as e:
return JsonResponse({'error': str(e)})
return JsonResponse(vacancies_json, safe=False)
def vacancy_list(request):
if request.method == 'GET':
# vacancies = Vacancy.objects.filter(salary__in=[1500, 1800])
vacancies = Vacancy.objects.all()
vacancies_json = [vacancy.to_json() for vacancy in vacancies]
return JsonResponse(vacancies_json, safe=False)
elif request.method == 'POST':
pass
def vacancy_detail(request, vacancy_id):
try:
vacancy = Vacancy.objects.get(id=vacancy_id)
vacancy_json = vacancy.to_json()
except Vacancy.DoesNotExist as e:
return JsonResponse({'error': str(e)})
return JsonResponse(vacancy_json, safe=False)
def top_ten_vacancies(requests):
top_vacancies = Vacancy.objects.order_by('-salary')[:10]
top_json = [top.to_json() for top in top_vacancies]
return JsonResponse(top_json, safe=False)
|
[
"diasik_jj@mail.ru"
] |
diasik_jj@mail.ru
|
3ca70e7899cae6e8da6ff35dcef69212de849360
|
b141fa3df722bcf2acf4ca6734633f4c571765e8
|
/Cycle detection.py
|
4fb36a914ae00f0834bc4d87bd980deb94e06077
|
[] |
no_license
|
lilmoonkk/Graph-algorithm---Cycle-Detection
|
dd6a4997cceacecf6f927a6c078ccbf820c2431a
|
565df01ba6fd67b83ae1ae76c6753297103586e6
|
refs/heads/main
| 2023-06-09T02:40:55.616825
| 2021-07-01T12:27:27
| 2021-07-01T12:27:27
| 382,024,064
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,662
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun May 30 15:49:38 2021
@author: Moon
"""
import random
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
distance_storage = np.array([[0,3235,1883,1012,19116],[3235,0,3198,2397,15881],
[1883,3198,0,1351,17671],[1012,2397,1351,0,18152],
[19116,15881,17671,18152,0]])
def GetKey(dictionary, value):
for key, item in dictionary.items():
if item == value:
return key
class Graph:
def __init__(self):
#initial graph
self.graph = np.array([[0,3235,0,0,0],[0,0,3198,0,0],
[0,0,0,1351,0],[0,0,0,0,0],[19116,0,0,18152,0]])
# MA: Marseille, France,YE: Yerevan, Armenia,OS: Orlo, Norway,
# VI: Vienna Austria, WE: Wellington, New Zealand
self.city = {0:"MA", 1:"YE", 2:"OS", 3:"VI", 4:"WE"}
def DisplayGraph(self, title):
rows, cols = np.where(self.graph != 0)
weights=[]
for i in range(len(rows)):
weights.append(self.graph[rows[i]][cols[i]])
edges = zip(rows.tolist(),cols.tolist(),weights)
g = nx.DiGraph()
g.add_weighted_edges_from(edges)
#Create positions of all nodes and save them
pos = nx.circular_layout(g)
#Draw the graph according to node positions
nx.draw(g, pos, node_size=1000, labels=self.city, with_labels=True,
connectionstyle='arc3, rad = 0.1',arrows=True)
#Draw edge labels according to node positions
nx.draw_networkx_edge_labels(g, pos, edge_labels=nx.get_edge_attributes(g,'weight'))
plt.title(title)
plt.legend(['MA: Marseille, France','YE: Yerevan, Armenia','OS: Orlo, Norway',
'VI: Vienna Austria', 'WE: Wellington, New Zealand'],
loc = 'lower right', fontsize = 6.5)
plt.show()
def RemoveEdge(self, sc, dc):
#sc=source city dc=destiniation city
sc_index = GetKey(self.city, sc)
dc_index = GetKey(self.city, dc)
if self.graph[sc_index][dc_index] == 0 :
print("The edge does not exist")
else:
self.graph[sc_index][dc_index] = 0
def AddEdge(self):
#sc=source city dc=destiniation city
#generate sc and dc
valid = False
while valid == False:
sc_index = random.choice(list(self.city.keys()))
dc_index = sc_index #to start while loop
#make sure sc != dc
while (dc_index == sc_index):
dc_index = random.choice(list(self.city.keys()))
if self.graph[sc_index][dc_index] == 0 :
valid = True
self.graph[sc_index][dc_index] = distance_storage[sc_index][dc_index]
# ==============Strongly Connected by Tan Hooi Ting===============================================
#Function used by isSC() to perform the DFS
def DFS(self, v, visited):
#Mark the source vertex, v as visited
visited[v] = True
#Recur for all the unvisited vertices adjacent to the source vertex
for i in range(5):
if visited[i]==False:
if self.graph[v][i]>0:
self.DFS(i, visited)
#Function that return the transpose of the current graph
def getTranspose(self):
#initialize a new graph
transposeG = Graph()
#Transpose the new graph
for i in range(5):
for j in range(5):
transposeG.graph[i][j] = self.graph[j][i]
return transposeG
#Function that return true if the graph is strongly connected
def isSC(self):
#set all vertices as not visited
visited = [False]*5
#Start DFS traversal from the first vertex
self.DFS(0, visited)
for i in range(5):
if visited[i] == False:
return False
#If the current graph is strongly connected, start traverse the transpose graph
g = self.getTranspose()
visited = [False]*5
g.DFS(0,visited)
for i in range(5):
if visited[i] == False:
return False
return True
#Function to generate strongly connected graph
def StronglyConnected(self):
SC = self.isSC()
while SC == False:
self.AddEdge()
SC = self.isSC()
if SC == True:
self.DisplayGraph("Strongly Connected Graph")
# ==============DETECT CYCLE by Koo Xi Moon===================================
def CheckCycle(self):
visited = [False] * 5
stack = [False] * 5
edges = []
for city in self.city:
if visited[city] == False:
if self.DFSFindCycle(city, visited, stack, edges):
self.DisplayCycle(edges)
return True
return False
# Depth First Search
def DFSFindCycle(self, city, visited, stack, edges):
visited[city] = True
stack[city] = True
#to find adjacent nodes
neighbours = []
items = self.graph[city,:]
for i in range(5):
if items[i] != 0:
neighbours.append(i)
#to reach adjacent node
for neighbour in neighbours:
if visited[neighbour] == False:
if self.DFSFindCycle(neighbour, visited, stack, edges):
edges.append([city,neighbour])
return True
#determinant
#found the node if the adjacent node is already in stack
elif stack[neighbour] == True:
edges.append([city,neighbour])
return True
stack[city] = False
return False
def DisplayCycle(self, edges):
#reverse edges
edges = edges[::-1]
#cut out edges that are not in the cycle
intercept_node = edges[-1][-1]
for i in range(len(edges)):
if edges[i][0] == intercept_node:
edges = edges[i:]
break
#flatten edges
nodes = sum(edges, [])
city = {0:"MA", 1:"YE", 2:"OS", 3:"VI", 4:"WE"}
#filter out city nodes that are not involved
for place in self.city:
if place not in nodes:
city.pop(place)
#start plotting graph
g = nx.DiGraph()
g.add_edges_from(edges)
#Create positions of all nodes and save them
pos = nx.circular_layout(g)
#Draw the graph according to node positions
nx.draw(g, pos, node_size=1000, labels=city, with_labels=True,
connectionstyle='arc3, rad = 0.1',arrows=True)
#Draw edge labels according to node positions
plt.title("Detected Cycle")
plt.show()
# Depth First Search
def DFSFindCycleWithCertainNode(self, city, visited, edges, node):
visited[city] = True
#to find adjacent nodes
neighbours = []
items = self.graph[city,:]
for i in range(5):
if items[i] != 0:
neighbours.append(i)
#to reach adjacent node
for neighbour in neighbours:
if visited[neighbour] == False:
if self.DFSFindCycleWithCertainNode(neighbour, visited, edges, node):
edges.append([city,neighbour])
return True
# determinant
# found the cycle if the adjacent node is the chosen node
elif neighbour == node:
edges.append([city,neighbour])
return True
return False
def GetCycleWithAppointedNode(self, node):
visited = [False] * 5
edges = []
node = GetKey(self.city, node)
while(self.DFSFindCycleWithCertainNode(node, visited, edges, node)==False):
self.AddEdge()
self.DisplayGraph("Current Graph")
print("The current graph has no detected cycle with the node you are looking for.")
print("No worries! We have added an edge for you.")
input("Press any key to continue.\n")
visited = [False] * 5
edges = []
self.DisplayCycle(edges)
# ==============SHORTEST PATH by Lee Hui Ying==================================
# Function used for BFS to check if there is a path between 2 vertices
def check_reachable(self,sc,dc):
#sc=source city dc=destiniation city
sc_index = GetKey(self.city, sc)
dc_index = GetKey(self.city, dc)
# Mark all the vertices is not visited yet
visited = [False] * 5
# Initialize a queue for BFS to store vertices which are not visited yet
queue = []
# Enqueue the source vertex into the queue and mark
# it as visited
queue.append(sc_index)
visited[sc_index] = True
# Keep checking if there is a vertex that has adjacent vertex which is
# not visited yet
while queue:
# Dequeue a vertex 'visit' from the queue
visit = queue.pop(0)
# Check if the vertex 'visit' is the destination vertex, return true
if visit == dc_index:
return True
# Else, do BFS
# For all vertices adjacent to vertex 'visit' and the vertex is
# not visited yet, enqueue the vertex into the queue and mark it
# as vistied
for i in range(5):
if (self.graph[visit][i] != 0 and
(not visited[i])):
queue.append(i)
visited[i] = True
# If finish doing BFS, but the destination vertex is not visited, return false
return False
# Function used to compute the shortest path from the source to destination
# by using Dijkstra's algorithm
def ShortestPath(self,sc,dc):
row = len(self.graph)
col = len(self.graph[0])
# Initialize all distances as INFINITY
D = [float("Inf")] * row
# Initialize an array to store the shortest tree
Shortest_tree = [-1]*row
# Get the index of the source city and destinaiton city in the city dictionary
sc_index = GetKey(self.city, sc)
dc_index = GetKey(self.city, dc)
# Assign the distance from source city to source city as 0
D[sc_index] = 0
# Create a priority queue and store all the vertices of the graph
priority_q=[]
for i in range(row):
priority_q.append(i)
# While the priority queue is not empty, find the shortest path for all vertices
while priority_q:
# Check the index of vertex with minimum distance from the priority queue
u = self.minKey(D,priority_q)
# Remove the vertex with minimum distance from the priority queue
priority_q.remove(u)
# Check if u is the index of destination then print the shortest path
# and shortest distance, then return
if u == dc_index:
print("\n Shortest distance = ", self.printPath(Shortest_tree, u))
return 0
# Cheeck all adjacent vertices of the dequeued vertex with index, u
# If an adjacent vertex has not been visited and still in the priority queue,
# then get the weighted edge and compare the distance value in D[]
# If the sum of distance of vertex with index,u is smaller than D[i],
# Update the D[] value and the Shortest_tree[]
for i in range(col):
if(self.graph[u][i] != 0 and i in priority_q):
w = self.graph[u][i]
if((D[u] + w) < D[i]):
D[i] = D[u] + w
Shortest_tree[i]=u
# Function used to return the index with minimum distance in the priority queue used
# in the function ShortestPath
def minKey(self,D,priority_q):
# Initialize min value and min_index as -1
minimum = float("Inf")
min_index = -1
# From the D array, use the for loop to check the minimum distance and
# return the index
for i in range(len(D)):
if D[i] < minimum and i in priority_q:
minimum = D[i]
min_index = i
return min_index
# Function used to print the shortest path in the Shortest_tree array using recursion
# and return the value of shortest distance
def printPath(self,Shortest_tree,u):
row = len(self.graph)
distance = 0
#Base Case : If u is source
# when Shortest_tree[-1] then path length = 0
if Shortest_tree[u] == -1 and u < row:
print ("\n Shortest path = ",self.city[u],end=" "),
return distance
# Recursion : print the vertex that the path go througH
# and compute the shortest distance
distance = distance_storage[u][Shortest_tree[u]]
length = self.printPath(Shortest_tree, Shortest_tree[u])
distance = length + distance
print("->", end=" ")
# print vertex only if its less than original vertex length.
if u < row :
print (self.city[u],end=" "),
return distance
#=============================================================================
class Menu:
def __init__(self):
self.G = Graph()
self.G.DisplayGraph("Initial Graph")
print("MA: Marseille, France")
print("YE: Yerevan, Armenia")
print("OS: Orlo, Norway")
print("VI: Vienna Austria")
print("WE: Wellington, New Zealand")
def UserMenu(self):
proceed = True
while(proceed == True):
print("\t===================Menu===================")
print("\t 1. Cycle Detection")
print("\t 2. Remove edges")
print("\t 3. Display current graph")
print("\t 4. Reset")
print("\t 5. Exit\n")
choice = input("\t Please enter the number of your choice: ")
if(choice == '1'):
self.Function_1()
elif(choice == '2'):
self.Function_2()
elif(choice == '3'):
self.G.DisplayGraph("Current Graph")
elif(choice == '4'):
self.G.__init__()
self.G.DisplayGraph("Current Graph")
elif(choice == '5'):
proceed = False
else:
print("\n\t\t Invalid input")
return
def Function_1(self):
choice=input("Are you looking for\n1. Any Cycle\n2. Cycle with a node you are looking for\n")
if(choice == '1'):
while self.G.CheckCycle() == False :
self.G.AddEdge()
self.G.DisplayGraph("Current Graph")
print("The current graph has no detected cycle.")
print("No worries! We have added an edge for you.")
input("Press any key to continue.\n")
elif(choice == '2'):
print("\nFrom")
print("MA, YE, OS, VI, WE")
node = input ("Enter the node :")
if(node not in self.G.city.values()):
print(node + " is not in the city list")
else:
self.G.GetCycleWithAppointedNode(node)
else:
print("\n\t\t Invalid input")
def Function_2(self):
print("\nFrom")
print("MA, YE, OS, VI, WE")
sc = input ("Enter source city :")
dc = input("Enter destination city : ")
print("Source city = " + sc)
print("Destination city = " + dc)
# If the source city and the destination city are the same, return 0 length
if (sc == dc):
print("The source city and the destination city are the same city, the shortest distance is 0")
return 0
if(sc in self.G.city.values() and dc in self.G.city.values()):
# Random generate edges between cities until there is a path between the two vertices
self.G.RemoveEdge(sc, dc)
self.G.DisplayGraph("Current Graph")
else:
if(sc not in self.G.city.values()):
print(sc + " is not in the city list")
if(dc not in self.G.city.values()):
print(dc + " is not in the city list")
#Run the program
UserMenu = Menu()
UserMenu.UserMenu()
|
[
"noreply@github.com"
] |
noreply@github.com
|
d81bf82845c2f8e12980533f9d59d8e047901438
|
e07da133c4efa517e716af2bdf67a46f88a65b42
|
/hub20/apps/blockchain/management/commands/sync_blockchain.py
|
b28399a2a20dc1fa41b00b74038c1e23e9e449e6
|
[
"MIT"
] |
permissive
|
cryptobuks1/hub20
|
be1da5f77a884f70068fd41edaa45d5e65b7c35e
|
3a4d9cf16ed9d91495ac1a28c464ffb05e9f837b
|
refs/heads/master
| 2022-04-19T21:26:15.386567
| 2020-04-19T07:17:47
| 2020-04-19T07:17:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,304
|
py
|
import asyncio
import logging
from django.core.management.base import BaseCommand
from hub20.apps.blockchain.app_settings import START_BLOCK_NUMBER
from hub20.apps.blockchain.models import Block, make_web3
logger = logging.getLogger(__name__)
def split_block_lists(block_numbers, group_size=25):
for n in range(0, len(block_numbers), group_size):
yield block_numbers[n : n + group_size]
async def make_blocks_in_range(w3, start, end, speed=25):
chain_id = int(w3.net.version)
chain_blocks = Block.objects.filter(chain=chain_id)
block_range = (start, end)
recorded_block_set = set(
chain_blocks.filter(number__range=block_range).values_list("number", flat=True)
)
range_set = set(range(*block_range))
missing_blocks = list(range_set.difference(recorded_block_set))[::-1]
counter = 0
logger.info(f"{len(missing_blocks)} missing blocks between {start} and {end}")
for block_list in split_block_lists(missing_blocks, group_size=speed):
for block_number in block_list:
counter += 1
if (counter % speed) == 0:
await asyncio.sleep(1)
Block.make_all(block_number, w3)
else:
await asyncio.sleep(1)
async def save_new_blocks(w3):
current_block_number = w3.eth.blockNumber
while True:
logger.info(f"Current block number: {current_block_number}")
block_number = w3.eth.blockNumber
if block_number > current_block_number:
Block.make_all(block_number, w3)
current_block_number = block_number
else:
await asyncio.sleep(5)
async def backfill(w3):
SCAN_SIZE = 5000
end = w3.eth.blockNumber
while end > START_BLOCK_NUMBER:
start = max(end - SCAN_SIZE, START_BLOCK_NUMBER)
await make_blocks_in_range(w3, start, end)
end = start
logger.info(f"Backfill complete. All blocks from {end} now recorded")
class Command(BaseCommand):
help = "Listens to new blocks and transactions on event loop and saves on DB"
def handle(self, *args, **options):
w3 = make_web3()
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(asyncio.gather(save_new_blocks(w3), backfill(w3)))
finally:
loop.close()
|
[
"raphael@lullis.net"
] |
raphael@lullis.net
|
b292bac4c82c505a69551f7b36c2d8b63c9e89b6
|
f233b4ac980197536c597da44e0f8c59423b3b79
|
/Chapter_4/sequence_data_type.py
|
668faf6a322b26937506e290d762d273858ede2a
|
[
"Apache-2.0"
] |
permissive
|
alenasf/AutomateTheBoringStuff
|
594142f5a205ebd0e9197e57e5230233a1f35aea
|
041e56221eb98d9893c24d22497034e6344c0490
|
refs/heads/master
| 2022-11-08T01:23:08.311049
| 2020-06-30T21:51:23
| 2020-06-30T21:51:23
| 265,424,172
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,399
|
py
|
#
name = 'Zophie'
name[0] # 'Z'
name[-2] # 'i'
name[0:4] # 'Zoph'
'Zo' in name # True
'z' in name # False
'p' not in name # False
for i in name:
print('***' + i + '***')
#***Z***
#***o***
#***p***
#***h***
#***i***
#***e***
# Mutable and Immutable Data Types
name = 'Zophie a cat'
newName = name[0:7] + 'the' + name[8:12]
name # 'Zophie a cat'
newName # 'Zophie the cat'
# the contents of eggs are replaced with a new list value
eggs = [1, 2, 3]
eggs = [4, 5, 6]
eggs # [4, 5, 6]
# modify the original list value in place
eggs = [1, 2, 3]
del eggs[2]
del eggs[1]
del eggs[0]
eggs.append(4)
eggs.append(5)
eggs.append(6)
eggs # [4, 5, 6]
# The Tuple Data Type
eggs = ('hello', 42, 0.5)
eggs[0] # 'hello'
eggs[1:3] # ( 42, 0.5)
len(eggs) # 3
type(('hello', )) # <class 'tuple'>
type(('hello')) # <class 'str'>
#Converting Types with list() and tuple() functions
tuple(['cat', 'dog', 5]) # ('cat', 'dog', 5)
list(('cat', 'dog', 5)) # ['cat', 'dog', 5]
list('hello') # ['h', 'e', 'l', 'l', 'o']
## References
# integers
spam = 42
cheese = spam
spam = 100
spam # 100
cheese # 42
#lists
spam = [0, 1, 2, 3, 4, 5]
cheese = spam # The reference is being copied, not the list
cheese[1] = 'Hello'# This changes the list value.
spam # [0, 'Hello', 2, 3, 4, 5]
cheese # The cheese variable refers to the same list.
# [0, 'Hello', 2, 3, 4, 5]
#Identity and the id() Function
bacon = 'Hello'
id(bacon) # 140694618978864
bacon += ' world!' # A new string is made from 'Hello' and ' world!'
id(bacon) # bacon now refers to a completely different string. # 140694619023280
eggs = ['cat', 'dog'] # This creates a new list.
id(eggs) # 140694618441680
eggs.append('moose') # append() modifies the list "in place".
id(eggs) # eggs still refers to the same list as before. # 140694618441680
eggs = ['bat', 'rat', 'cow'] # This creates a new list, which has a new indentity.
id(eggs) # eggs now refers to a completely different list. # 140694618224352
# Passing References
def eggs(someParameter):
someParameter.append('Hello')
spam = [1, 2, 3]
eggs(spam)
print(spam)
# [1, 2, 3, 'Hello']
# The copy Module's copy() and deepcopy() Function
import copy
spam = ['A', 'B', 'C', 'D']
id(spam) # 140694617686816
cheese = copy.copy(spam)
id(cheese) # cheese is different list wih different indentity. # 140694618396144
cheese[1] = 42
spam # ['A', 'B', 'C', 'D']
cheese # ['A', 42, 'C', 'D']
|
[
"30708007+alenasf@users.noreply.github.com"
] |
30708007+alenasf@users.noreply.github.com
|
9de0c9caba42de454a384bb60732507d489ce0e4
|
a6227b36e30d5cafaa3e66067744913771209106
|
/counter/views.py
|
6d2233cd1501dec1eea2f24ef48d1ba1e4b1fb2b
|
[] |
no_license
|
prasannatuladhar/newwordcounter
|
0c5f716b359162ea0bafcbab8fc0c109171f1427
|
b4d7277cba3fd31b6b51e641ca4ca6e4785d0b1d
|
refs/heads/master
| 2020-03-21T21:15:23.963462
| 2018-06-30T19:12:10
| 2018-06-30T19:12:10
| 139,054,700
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 440
|
py
|
from django.shortcuts import render
def home(request):
return render(request,'home.html')
def count(request):
textarea = request.GET['textarea']
wordlist = textarea.split()
words = {}
for word in wordlist:
if word in words:
words[word] += 1
else:
words[word] = 1
return render(request,'counter.html', {'counter':len(wordlist),'textarea':textarea,'words':words.items()})
|
[
"prasanna.pmt@gmail.com"
] |
prasanna.pmt@gmail.com
|
edb2584bd2ffb584e0b19a5360d8bf77347a593b
|
93db6e557d15a757ca7b53ce08acd29e1ed75ef1
|
/blog/models.py
|
699c7bae7b886e5e0f6d9423d507ed2ee0b9bb3d
|
[] |
no_license
|
elkhot/mysite
|
4058fc41e5b7d6ecb49916d8bbbfe0e720d78bd2
|
c82371e2ab2419e4ef9ba1dd3860dedd4a7777cf
|
refs/heads/master
| 2022-01-29T20:23:14.044232
| 2019-07-22T19:41:48
| 2019-07-22T19:41:48
| 198,111,247
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 511
|
py
|
from django.db import models
from django.contrib.auth.models import User
class Blog(models.Model):
title = models.CharField(max_length=100)
slug = models.SlugField()
body = models.TextField()
date = models.DateTimeField(auto_now_add=True)
thumb = models.ImageField(default='default.png', blank=True)
author = models.ForeignKey(User, on_delete=models.CASCADE, default=None)
def __str__(self):
return self.title
def snippet(self):
return self.body[:50] + '...'
|
[
"elkhot@gmail.com"
] |
elkhot@gmail.com
|
7260fc8fc752f352287646282d624abfa0a0dd85
|
2d4ad8674d6590d02f80214d9c67cd61a9ae7920
|
/src/includes/data_array.py
|
3c5334d30b7cf9527257881072e26bbfe461747a
|
[] |
no_license
|
lotka/autofaces
|
abd02191d1bb094a957018da18e7c93a027e46ff
|
6f456a5452abed81e2ea0669e43c9bc76d8eb58a
|
refs/heads/master
| 2021-01-15T23:58:17.377999
| 2017-04-11T16:40:55
| 2017-04-11T16:40:55
| 58,305,916
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,513
|
py
|
import collections
import os
import numpy as np
import h5py
class FileHDF5(object):
def __init__(self, path_to_file, name_item):
self.__path_to_file = path_to_file
self.__name_item = name_item
self.print_enabled = True
self.__shape = None
def __getitem__(self, item):
self._print('read file ' + self.__path_to_file)
f = h5py.File(self.__path_to_file, 'r')
hdf5_item = f.get(self.__name_item)
# update shape; should have low cost
self.__shape = hdf5_item.shape
hdf5_item = hdf5_item[item]
data = np.array(hdf5_item)
f.close()
return data
def _print(self, str_):
if self.print_enabled:
classname = type(self).__name__
print("[" + classname + "] " + str_)
def __len__(self):
self.__load_info()
return self.__shape[0]
def __repr__(self):
classname = type(self).__name__
string = "{}(path_to_file={}, name_item={})".format(classname, repr(self.__path_to_file),
repr(self.__name_item))
return string
def __load_info(self):
if self.__shape is None:
f = h5py.File(self.__path_to_file, 'r')
hdf5_item = f.get(self.__name_item)
self.__shape = hdf5_item.shape
f.close()
@property
def shape(self):
self.__load_info()
return self.__shape
@property
def ndims(self):
self.__load_info()
return len(self.__shape)
class TransposeFront(object):
def __init__(self, obj, ndims, front, reverse=False):
assert front < ndims
self.obj = obj
self.ndims = ndims
self.front = front
self.reverse = reverse
def __order(self):
order = list(range(self.ndims))
front_elem = order.pop(self.front)
if self.reverse:
order.reverse()
order.insert(0, front_elem)
return order
def __order_without(self, keep_dim):
order = []
next = 0
for k in keep_dim:
if k:
order.append(next)
next += 1
else:
order.append(None)
front_elem = order.pop(self.front)
if self.reverse:
order.reverse()
order.insert(0, front_elem)
order = [o for o in order if o is not None]
return order
def __order_inv(self):
order = self.__order()
order_inv = [0] * len(order)
for i in range(len(order)):
order_inv[order[i]] = i
return order_inv
@property
def shape(self):
shape_inner = self.obj.shape
# lazy checking of consistency
assert self.ndims == len(shape_inner)
shape = tuple(shape_inner[i] for i in self.__order())
return shape
def __getitem__(self, item):
if not isinstance(item, collections.Iterable):
item = (item,)
assert len(item) <= self.ndims
item_all = [np.s_[:],] * self.ndims
keep_dim = np.ones(self.ndims, dtype=bool)
for n, it in enumerate(item):
item_all[n] = it
if isinstance(it, int):
keep_dim[n] = False
item_inner = tuple(item_all[i] for i in self.__order_inv())
data = self.obj[item_inner]
data = np.transpose(data, axes=self.__order_without(keep_dim))
return data
def __repr__(self):
classname = type(self).__name__
string = "{}(obj={}, ndims={}, front={}, reverse={})".format(classname, repr(self.obj), repr(self.ndims),
repr(self.front), repr(self.reverse))
return string
def __len__(self):
return self.shape[0]
class FileArray(object):
def __init__(self):
# FileHDF5 arguments
self.parts_path_to_file = None
self.name_item = None
# TransposeFront arguments
self.ndims = None
self.front = None
self.reverse = None
# other arguments
self.K = None # should have same shape as single data sample
def get_path_to_file(self, *args):
path_to_file_raw = os.path.join(*(self.parts_path_to_file))
path_to_file = path_to_file_raw.format(*args)
return path_to_file
def get_array_id(self, *args):
path_to_file = self.get_path_to_file(*args)
file_hdf5 = FileHDF5(path_to_file, self.name_item)
file_hdf5_transpose= TransposeFront(file_hdf5, self.ndims, self.front, self.reverse)
return file_hdf5_transpose
def __getitem__(self, item):
return self.get_array_id(item)
class IndicesCollection(object):
def __init__(self, coll=None, id_array=None):
self.coll = coll
if id_array is not None:
self.from_id_array(id_array)
def __repr__(self):
return repr(self.coll)
def __str__(self):
return str(self.coll)
def getitem(self, obj):
result = []
ids = []
for c in self.coll:
obj_inner = obj
if not isinstance(c, collections.Iterable):
c = (c,)
ids_act = []
for ind in c:
if isinstance(ind, slice):
s = ind.indices(len(obj_inner))
ids_act.append((np.arange(s[0], s[1], s[2]),))
else:
ids_act.append(ind)
obj_inner = obj_inner[ind]
result.append(obj_inner)
ids.append(ids_act)
result = np.concatenate(result)
id_array = IndicesCollection(ids).to_id_array()
return result, id_array
def to_id_array(self):
id_all = np.zeros((0,0), dtype=int)
for c in self.coll:
id_prefix = np.zeros((1, 0), dtype=int)
if not isinstance(c, collections.Iterable):
c = (c,)
for ind in c:
ind = np.array(ind, dtype=int)
ind = ind.reshape(-1,1)
if ind.shape[0] > 1:
id_prefix = np.broadcast_to(id_prefix, (ind.shape[0], id_prefix.shape[1]))
if id_prefix.shape[0] > 1:
ind = np.broadcast_to(ind, (id_prefix.shape[0], ind.shape[1]))
id_prefix = np.concatenate([id_prefix, ind], axis=1)
if id_all.shape[0] == 0:
id_all = id_prefix
else:
id_all = np.concatenate([id_all, id_prefix], axis=0)
return id_all
def from_id_array(self, id_all):
self.coll = []
self.__from_id_array_rec(id_all)
def __from_id_array_rec(self, id_all):
if id_all.shape[0] == 0:
# finished
return
if id_all.shape[1] == 1:
# all within single indices
self.coll.append([(id_all,)])
return
# beginning of first row is the next id
id_next = id_all[0,:-1]
ind = np.all(id_all[:,:-1] == id_next, axis=1)
ind_tuple = [x for x in id_next]
ind_tuple.append((id_all[ind,-1],))
self.coll.append(ind_tuple)
id_all = id_all[~ind, :]
self.__from_id_array_rec(id_all)
|
[
"lukam321@gmail.com"
] |
lukam321@gmail.com
|
a9b833ac69b6d381c12a94bbea7ed0657a2dec65
|
b4b7abf5e410969e6b6a6a7539a2b14242d03b54
|
/graph_pb2.py
|
87b655a9a2724ef810a641d66e13ce60bf52514f
|
[] |
no_license
|
andreimargeloiu/Syntactic-Neural-Code-Completion
|
0f5263923ed72bf88bbf53bdc6830c5d883852f7
|
378a43a228c5a311e4116b7ae913453b81ce2a33
|
refs/heads/master
| 2022-04-07T09:31:25.647867
| 2020-03-05T18:22:20
| 2020-03-05T18:22:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| true
| 15,192
|
py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: graph.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='graph.proto',
package='protobuf',
syntax='proto2',
serialized_pb=_b('\n\x0bgraph.proto\x12\x08protobuf\"\xa6\x03\n\x0b\x46\x65\x61tureNode\x12\n\n\x02id\x18\x01 \x01(\x03\x12,\n\x04type\x18\x02 \x01(\x0e\x32\x1e.protobuf.FeatureNode.NodeType\x12\x10\n\x08\x63ontents\x18\x03 \x01(\t\x12\x15\n\rstartPosition\x18\x04 \x01(\x05\x12\x13\n\x0b\x65ndPosition\x18\x05 \x01(\x05\x12\x17\n\x0fstartLineNumber\x18\x06 \x01(\x05\x12\x15\n\rendLineNumber\x18\x07 \x01(\x05\"\xee\x01\n\x08NodeType\x12\t\n\x05TOKEN\x10\x01\x12\x0f\n\x0b\x41ST_ELEMENT\x10\x02\x12\x10\n\x0c\x43OMMENT_LINE\x10\x03\x12\x11\n\rCOMMENT_BLOCK\x10\x04\x12\x13\n\x0f\x43OMMENT_JAVADOC\x10\x05\x12\x14\n\x10IDENTIFIER_TOKEN\x10\x07\x12\x0c\n\x08\x46\x41KE_AST\x10\x08\x12\n\n\x06SYMBOL\x10\t\x12\x0e\n\nSYMBOL_TYP\x10\n\x12\x0e\n\nSYMBOL_VAR\x10\x0b\x12\x0e\n\nSYMBOL_MTH\x10\x0c\x12\x08\n\x04TYPE\x10\r\x12\x14\n\x10METHOD_SIGNATURE\x10\x0e\x12\x0c\n\x08\x41ST_LEAF\x10\x0f\"\xa0\x03\n\x0b\x46\x65\x61tureEdge\x12\x10\n\x08sourceId\x18\x01 \x01(\x03\x12\x15\n\rdestinationId\x18\x02 \x01(\x03\x12,\n\x04type\x18\x03 \x01(\x0e\x32\x1e.protobuf.FeatureEdge.EdgeType\"\xb9\x02\n\x08\x45\x64geType\x12\x14\n\x10\x41SSOCIATED_TOKEN\x10\x01\x12\x0e\n\nNEXT_TOKEN\x10\x02\x12\r\n\tAST_CHILD\x10\x03\x12\x08\n\x04NONE\x10\x04\x12\x0e\n\nLAST_WRITE\x10\x05\x12\x0c\n\x08LAST_USE\x10\x06\x12\x11\n\rCOMPUTED_FROM\x10\x07\x12\x0e\n\nRETURNS_TO\x10\x08\x12\x13\n\x0f\x46ORMAL_ARG_NAME\x10\t\x12\x0e\n\nGUARDED_BY\x10\n\x12\x17\n\x13GUARDED_BY_NEGATION\x10\x0b\x12\x14\n\x10LAST_LEXICAL_USE\x10\x0c\x12\x0b\n\x07\x43OMMENT\x10\r\x12\x15\n\x11\x41SSOCIATED_SYMBOL\x10\x0e\x12\x0c\n\x08HAS_TYPE\x10\x0f\x12\x11\n\rASSIGNABLE_TO\x10\x10\x12\x14\n\x10METHOD_SIGNATURE\x10\x11\"\xba\x01\n\x05Graph\x12#\n\x04node\x18\x01 \x03(\x0b\x32\x15.protobuf.FeatureNode\x12#\n\x04\x65\x64ge\x18\x02 \x03(\x0b\x32\x15.protobuf.FeatureEdge\x12\x12\n\nsourceFile\x18\x03 \x01(\t\x12*\n\x0b\x66irst_token\x18\x04 \x01(\x0b\x32\x15.protobuf.FeatureNode\x12\'\n\x08\x61st_root\x18\x05 \x01(\x0b\x32\x15.protobuf.FeatureNodeB3\n$uk.ac.cam.acr31.features.javac.protoB\x0bGraphProtos')
)
_FEATURENODE_NODETYPE = _descriptor.EnumDescriptor(
name='NodeType',
full_name='protobuf.FeatureNode.NodeType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='TOKEN', index=0, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AST_ELEMENT', index=1, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='COMMENT_LINE', index=2, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='COMMENT_BLOCK', index=3, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='COMMENT_JAVADOC', index=4, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IDENTIFIER_TOKEN', index=5, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FAKE_AST', index=6, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SYMBOL', index=7, number=9,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SYMBOL_TYP', index=8, number=10,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SYMBOL_VAR', index=9, number=11,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SYMBOL_MTH', index=10, number=12,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE', index=11, number=13,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='METHOD_SIGNATURE', index=12, number=14,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AST_LEAF', index=13, number=15,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=210,
serialized_end=448,
)
_sym_db.RegisterEnumDescriptor(_FEATURENODE_NODETYPE)
_FEATUREEDGE_EDGETYPE = _descriptor.EnumDescriptor(
name='EdgeType',
full_name='protobuf.FeatureEdge.EdgeType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='ASSOCIATED_TOKEN', index=0, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NEXT_TOKEN', index=1, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AST_CHILD', index=2, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NONE', index=3, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LAST_WRITE', index=4, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LAST_USE', index=5, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='COMPUTED_FROM', index=6, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RETURNS_TO', index=7, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FORMAL_ARG_NAME', index=8, number=9,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GUARDED_BY', index=9, number=10,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GUARDED_BY_NEGATION', index=10, number=11,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LAST_LEXICAL_USE', index=11, number=12,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='COMMENT', index=12, number=13,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ASSOCIATED_SYMBOL', index=13, number=14,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HAS_TYPE', index=14, number=15,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ASSIGNABLE_TO', index=15, number=16,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='METHOD_SIGNATURE', index=16, number=17,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=554,
serialized_end=867,
)
_sym_db.RegisterEnumDescriptor(_FEATUREEDGE_EDGETYPE)
_FEATURENODE = _descriptor.Descriptor(
name='FeatureNode',
full_name='protobuf.FeatureNode',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='protobuf.FeatureNode.id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='protobuf.FeatureNode.type', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='contents', full_name='protobuf.FeatureNode.contents', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='startPosition', full_name='protobuf.FeatureNode.startPosition', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='endPosition', full_name='protobuf.FeatureNode.endPosition', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='startLineNumber', full_name='protobuf.FeatureNode.startLineNumber', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='endLineNumber', full_name='protobuf.FeatureNode.endLineNumber', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_FEATURENODE_NODETYPE,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=26,
serialized_end=448,
)
_FEATUREEDGE = _descriptor.Descriptor(
name='FeatureEdge',
full_name='protobuf.FeatureEdge',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='sourceId', full_name='protobuf.FeatureEdge.sourceId', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='destinationId', full_name='protobuf.FeatureEdge.destinationId', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='protobuf.FeatureEdge.type', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_FEATUREEDGE_EDGETYPE,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=451,
serialized_end=867,
)
_GRAPH = _descriptor.Descriptor(
name='Graph',
full_name='protobuf.Graph',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='node', full_name='protobuf.Graph.node', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='edge', full_name='protobuf.Graph.edge', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sourceFile', full_name='protobuf.Graph.sourceFile', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='first_token', full_name='protobuf.Graph.first_token', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ast_root', full_name='protobuf.Graph.ast_root', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=870,
serialized_end=1056,
)
_FEATURENODE.fields_by_name['type'].enum_type = _FEATURENODE_NODETYPE
_FEATURENODE_NODETYPE.containing_type = _FEATURENODE
_FEATUREEDGE.fields_by_name['type'].enum_type = _FEATUREEDGE_EDGETYPE
_FEATUREEDGE_EDGETYPE.containing_type = _FEATUREEDGE
_GRAPH.fields_by_name['node'].message_type = _FEATURENODE
_GRAPH.fields_by_name['edge'].message_type = _FEATUREEDGE
_GRAPH.fields_by_name['first_token'].message_type = _FEATURENODE
_GRAPH.fields_by_name['ast_root'].message_type = _FEATURENODE
DESCRIPTOR.message_types_by_name['FeatureNode'] = _FEATURENODE
DESCRIPTOR.message_types_by_name['FeatureEdge'] = _FEATUREEDGE
DESCRIPTOR.message_types_by_name['Graph'] = _GRAPH
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
FeatureNode = _reflection.GeneratedProtocolMessageType('FeatureNode', (_message.Message,), dict(
DESCRIPTOR = _FEATURENODE,
__module__ = 'graph_pb2'
# @@protoc_insertion_point(class_scope:protobuf.FeatureNode)
))
_sym_db.RegisterMessage(FeatureNode)
FeatureEdge = _reflection.GeneratedProtocolMessageType('FeatureEdge', (_message.Message,), dict(
DESCRIPTOR = _FEATUREEDGE,
__module__ = 'graph_pb2'
# @@protoc_insertion_point(class_scope:protobuf.FeatureEdge)
))
_sym_db.RegisterMessage(FeatureEdge)
Graph = _reflection.GeneratedProtocolMessageType('Graph', (_message.Message,), dict(
DESCRIPTOR = _GRAPH,
__module__ = 'graph_pb2'
# @@protoc_insertion_point(class_scope:protobuf.Graph)
))
_sym_db.RegisterMessage(Graph)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n$uk.ac.cam.acr31.features.javac.protoB\013GraphProtos'))
# @@protoc_insertion_point(module_scope)
|
[
"andrei.margeloiu@gmail.com"
] |
andrei.margeloiu@gmail.com
|
69d42db8db5ccdb6da07528e5c8e31e48bc6746a
|
d66496d032af01056b5974a7a24484302f1ce332
|
/logiconio/urls.py
|
cb6493efd15f8b00ca12df643b8eb92ad3d16a27
|
[] |
no_license
|
lee-hodg/LogiconTheme
|
5a00a179a3b6345bbbd8d2c53ab4c7b8542edc3a
|
556fdfd75d517b8bf0c1680cfc87f68f0e0ffcfe
|
refs/heads/master
| 2023-01-11T18:12:27.020053
| 2019-02-17T21:14:19
| 2019-02-17T21:14:19
| 307,756,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,367
|
py
|
from __future__ import unicode_literals
from django.conf.urls import include, url
from django.conf.urls.i18n import i18n_patterns
from django.contrib import admin
from django.views.i18n import set_language
from mezzanine.conf import settings
from theme.views import AJAXPortfolioPageView
import mezzanine
admin.autodiscover()
# Add the urlpatterns for any custom Django applications here.
# You can also change the ``home`` view to add your own functionality
# to the project's homepage.
urlpatterns = i18n_patterns(
# Change the admin prefix here to use an alternate URL for the
# admin interface, which would be marginally more secure.
url("^admin/", include(admin.site.urls)),
)
if settings.USE_MODELTRANSLATION:
urlpatterns += [
url('^i18n/$', set_language, name='set_language'),
]
urlpatterns += [
# We don't want to presume how your homepage works, so here are a
# few patterns you can use to set it up.
# HOMEPAGE AS STATIC TEMPLATE
# ---------------------------
# This pattern simply loads the index.html template. It isn't
# commented out like the others, so it's the default. You only need
# one homepage pattern, so if you use a different one, comment this
# one out.
# url("^$", direct_to_template, {"template": "index.html"}, name="home"),
# HOMEPAGE AS AN EDITABLE PAGE IN THE PAGE TREE
# ---------------------------------------------
# This pattern gives us a normal ``Page`` object, so that your
# homepage can be managed via the page tree in the admin. If you
# use this pattern, you'll need to create a page in the page tree,
# and specify its URL (in the Meta Data section) as "/", which
# is the value used below in the ``{"slug": "/"}`` part.
# Also note that the normal rule of adding a custom
# template per page with the template name using the page's slug
# doesn't apply here, since we can't have a template called
# "/.html" - so for this case, the template "pages/index.html"
# should be used if you want to customize the homepage's template.
# NOTE: Don't forget to import the view function too!
url("^$", mezzanine.pages.views.page, {"slug": "/"}, name="home"),
# HOMEPAGE FOR A BLOG-ONLY SITE
# -----------------------------
# This pattern points the homepage to the blog post listing page,
# and is useful for sites that are primarily blogs. If you use this
# pattern, you'll also need to set BLOG_SLUG = "" in your
# ``settings.py`` module, and delete the blog page object from the
# page tree in the admin if it was installed.
# NOTE: Don't forget to import the view function too!
# url("^$", mezzanine.blog.views.blog_post_list, name="home"),
# MEZZANINE'S URLS
# ----------------
# ADD YOUR OWN URLPATTERNS *ABOVE* THE LINE BELOW.
# ``mezzanine.urls`` INCLUDES A *CATCH ALL* PATTERN
# FOR PAGES, SO URLPATTERNS ADDED BELOW ``mezzanine.urls``
# WILL NEVER BE MATCHED!
url(r'^ajaxportfolioitems/$', AJAXPortfolioPageView.as_view(), name="portpage"),
# If you'd like more granular control over the patterns in
# ``mezzanine.urls``, go right ahead and take the parts you want
# from it, and use them directly below instead of using
# ``mezzanine.urls``.
url("^", include("mezzanine.urls")),
# MOUNTING MEZZANINE UNDER A PREFIX
# ---------------------------------
# You can also mount all of Mezzanine's urlpatterns under a
# URL prefix if desired. When doing this, you need to define the
# ``SITE_PREFIX`` setting, which will contain the prefix. Eg:
# SITE_PREFIX = "my/site/prefix"
# For convenience, and to avoid repeating the prefix, use the
# commented out pattern below (commenting out the one above of course)
# which will make use of the ``SITE_PREFIX`` setting. Make sure to
# add the import ``from django.conf import settings`` to the top
# of this file as well.
# Note that for any of the various homepage patterns above, you'll
# need to use the ``SITE_PREFIX`` setting as well.
# ("^%s/" % settings.SITE_PREFIX, include("mezzanine.urls"))
]
# Adds ``STATIC_URL`` to the context of error pages, so that error
# pages can use JS, CSS and images.
handler404 = "mezzanine.core.views.page_not_found"
handler500 = "mezzanine.core.views.server_error"
|
[
"leehodg@gmail.com"
] |
leehodg@gmail.com
|
8a6ba53de0d67f640a8a9f14889c24584f961e1e
|
5cf24f0867fa19225b6eb87bb9d16defceace33b
|
/application/pkbadmin/views/dashboard_view.py
|
b505ef0abbf68be88b3e3f0d435d6ea493799253
|
[] |
no_license
|
Abhisheksoni1/pkbpy
|
9dec080fe78f40e3a884830ee6fff5899c7fe4c6
|
7845ac1cf57291d875e6e5bf04d8ee7fe36158ed
|
refs/heads/master
| 2023-06-30T05:31:01.143944
| 2021-08-03T19:33:01
| 2021-08-03T19:33:01
| 392,432,617
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,683
|
py
|
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.http import HttpResponse
from django.utils import timezone
from django.contrib.auth import get_user_model
from django.conf import settings
from pkbadmin.views.decorators import GroupRequiredMixin
from datetime import timedelta
from apps.orders.models import Order
from django.views import View
from apps.stores.models import Kitchen, StoreManager, StoreOwner, Store
from datetime import date
from django.db.models import Count
from django.http import HttpResponseRedirect, JsonResponse
from django.contrib import messages
from django.urls import reverse
class DashboardIndex(GroupRequiredMixin, View):
group_required = ['Super admin', 'Manager', 'Owner']
def get(self, request):
qs = get_user_model().objects.filter(is_staff=0, groups__name='User', is_active=1)
if request.user.is_superuser:
store = Store.objects.all()
today_date = date.today()
store_kitchen = [store for store in store if store.kitchens.all().count() != 0]
kitchen = Kitchen.objects.all()
order = Order.objects.all()
month_list = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12']
pending_order_monthly = []
confirmed_order_monthly = []
delivered_order_monthly = []
declined_order_monthly = []
for i in month_list:
pending_order_monthly.append(
len(Order.objects.filter(order_status=Order.ORDER_STATUS_PENDING, created_on__year=today_date.year,
created_on__month=i,
)))
confirmed_order_monthly.append(
len(Order.objects.filter(order_status=Order.ORDER_STATUS_CONFIRMED,
created_on__year=today_date.year, created_on__month=i,
)))
delivered_order_monthly.append(
len(Order.objects.filter(order_status=Order.ORDER_STATUS_DELIVERED,
created_on__year=today_date.year, created_on__month=i,
)))
declined_order_monthly.append(
len(Order.objects.filter(order_status=Order.ORDER_STATUS_DECLINED, created_on__year=today_date.year,
created_on__month=i,
)))
context = {
'total_users': len(qs),
'total_orders': len(order),
'store': store_kitchen,
'kitchen': kitchen,
'pending_order_month': pending_order_monthly,
'confirmed_order_month': confirmed_order_monthly,
'delivered_order_month': delivered_order_monthly,
'declined_order_month': declined_order_monthly,
}
return render(request, "dashboard/index.html", context)
elif "Manager" in request.user.group_name:
id = request.user.kitchenmanager.kitchen.id
kitchen = Kitchen.objects.filter(id=id)
today_date = date.today()
kitchen_id = [k.id for k in kitchen]
order = Order.objects.filter(kitchen_id__in=kitchen_id)
day_order = Order.objects.filter(created_on__date=today_date, kitchen_id__in=kitchen_id)
week_order = Order.objects.filter(created_on__gte=(timezone.now().date() - timedelta(days=7)),
kitchen_id__in=kitchen_id)
month_order = Order.objects.filter(created_on__gte=(timezone.now().date() - timedelta(days=30)),
kitchen_id__in=kitchen_id)
pending_order_week = Order.objects.filter(order_status=Order.ORDER_STATUS_PENDING,
created_on__gte=(timezone.now().date() - timedelta(days=7)),
kitchen_id__in=kitchen_id)
confirmed_order_week = Order.objects.filter(order_status=Order.ORDER_STATUS_CONFIRMED,
created_on__gte=(timezone.now().date() - timedelta(days=7)),
kitchen_id__in=kitchen_id
)
delivered_order_week = Order.objects.filter(order_status=Order.ORDER_STATUS_DELIVERED,
created_on__gte=(timezone.now().date() - timedelta(days=7)),
kitchen_id__in=kitchen_id
)
declined_order_week = Order.objects.filter(order_status=Order.ORDER_STATUS_DECLINED,
created_on__gte=(timezone.now().date() - timedelta(days=7)),
kitchen_id__in=kitchen_id
)
pending_order_daily = Order.objects.filter(order_status=Order.ORDER_STATUS_PENDING,
created_on__date=today_date, kitchen_id__in=kitchen_id
)
confirmed_order_daily = Order.objects.filter(order_status=Order.ORDER_STATUS_CONFIRMED,
created_on__date=today_date, kitchen_id__in=kitchen_id
)
delivered_order_daily = Order.objects.filter(order_status=Order.ORDER_STATUS_DELIVERED,
created_on__date=today_date, kitchen_id__in=kitchen_id
)
declined_order_daily = Order.objects.filter(order_status=Order.ORDER_STATUS_DECLINED,
created_on__date=today_date, kitchen_id__in=kitchen_id)
month_list = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12']
pending_order_monthly = []
confirmed_order_monthly = []
delivered_order_monthly = []
declined_order_monthly = []
for i in month_list:
pending_order_monthly.append(
len(Order.objects.filter(order_status=Order.ORDER_STATUS_PENDING, kitchen_id__in=kitchen_id,
created_on__year=today_date.year, created_on__month=i,
)))
confirmed_order_monthly.append(
len(Order.objects.filter(order_status=Order.ORDER_STATUS_CONFIRMED, kitchen_id__in=kitchen_id,
created_on__year=today_date.year, created_on__month=i,
)))
delivered_order_monthly.append(
len(Order.objects.filter(order_status=Order.ORDER_STATUS_DELIVERED, kitchen_id__in=kitchen_id,
created_on__year=today_date.year, created_on__month=i,
)))
declined_order_monthly.append(
len(Order.objects.filter(order_status=Order.ORDER_STATUS_DECLINED, kitchen_id__in=kitchen_id,
created_on__year=today_date.year, created_on__month=i,
)))
context = {
'kitchen': kitchen,
'kitchen_list': [kitchen.id for kitchen in kitchen],
'total_users': len(qs),
'total_orders': len(order),
'week_order': len(week_order),
'month_order': len(month_order),
'today_order': len(day_order),
'pending_order_month': pending_order_monthly,
'confirmed_order_month': confirmed_order_monthly,
'delivered_order_month': delivered_order_monthly,
'declined_order_month': declined_order_monthly,
'pending_order_week': len(pending_order_week),
'confirmed_order_week': len(confirmed_order_week),
'delivered_order_week': len(delivered_order_week),
'declined_order_week': len(declined_order_week),
'pending_order_daily': len(pending_order_daily),
'confirmed_order_daily': len(confirmed_order_daily),
'delivered_order_daily': len(delivered_order_daily),
'declined_order_daily': len(declined_order_daily),
}
return render(request, "dashboard/manager.html", context)
elif "Owner" in request.user.group_name:
try:
store_owner = StoreOwner.objects.get(owner=request.user)
store = store_owner.store
today_date = date.today()
kitchen = Kitchen.objects.filter(store_id=store.id)
order = Order.objects.all()
month_list = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12']
pending_order_monthly = []
confirmed_order_monthly = []
delivered_order_monthly = []
declined_order_monthly = []
for i in month_list:
pending_order_monthly.append(
len(Order.objects.filter(order_status=Order.ORDER_STATUS_PENDING, created_on__year=today_date.year,
created_on__month=i, kitchen__store_id=store.id
)))
confirmed_order_monthly.append(
len(Order.objects.filter(order_status=Order.ORDER_STATUS_CONFIRMED,
created_on__year=today_date.year, created_on__month=i,
kitchen__store_id=store.id
)))
delivered_order_monthly.append(
len(Order.objects.filter(order_status=Order.ORDER_STATUS_DELIVERED,
created_on__year=today_date.year, created_on__month=i,
kitchen__store_id=store.id
)))
declined_order_monthly.append(
len(Order.objects.filter(order_status=Order.ORDER_STATUS_DECLINED, created_on__year=today_date.year,
created_on__month=i, kitchen__store_id=store.id
)))
context = {
'total_users': len(qs),
'total_orders': len(order),
'store': store,
'kitchen': kitchen,
'pending_order_month': pending_order_monthly,
'confirmed_order_month': confirmed_order_monthly,
'delivered_order_month': delivered_order_monthly,
'declined_order_month': declined_order_monthly,
'user': request.user,
'bool':True
}
return render(request, "dashboard/owner.html", context)
except Exception as e:
""" bool to make sure we haven't addeded store yet"""
context = {
'total_users': len(qs),
'total_orders': 0,
'store': [],
'kitchen': [],
'pending_order_month': 0,
'confirmed_order_month': 0,
'delivered_order_month': 0,
'declined_order_month': 0,
'user': request.user,
'bool':False
}
return render(request, "dashboard/owner.html", context)
class GetFilterOrder(GroupRequiredMixin, View):
group_required = ['Super admin', 'Manager', 'Owner']
def post(self, request):
order_type = request.POST.get('order_type')
kitchen_id = request.POST.get('kitchen_id')
response = {'status': False, 'msg': '', 'data': {}}
today_date = date.today()
# print(request.user)
if order_type == "weekly":
pending_order_week = Order.objects.filter(order_status=Order.ORDER_STATUS_PENDING,
created_on__gte=(timezone.now().date() - timedelta(days=7))
)
if kitchen_id:
pending_order_week = pending_order_week.filter(kitchen_id=kitchen_id)
confirmed_order_week = Order.objects.filter(order_status=Order.ORDER_STATUS_CONFIRMED,
created_on__gte=(timezone.now().date() - timedelta(days=7)),
)
if kitchen_id:
confirmed_order_week = confirmed_order_week.filter(kitchen_id=kitchen_id)
delivered_order_week = Order.objects.filter(order_status=Order.ORDER_STATUS_DELIVERED,
created_on__gte=(timezone.now().date() - timedelta(days=7)),
)
if kitchen_id:
delivered_order_week = delivered_order_week.filter(kitchen_id=kitchen_id)
declined_order_week = Order.objects.filter(order_status=Order.ORDER_STATUS_DECLINED,
created_on__gte=(timezone.now().date() - timedelta(days=7)),
)
if kitchen_id:
declined_order_week = declined_order_week.filter(kitchen_id=kitchen_id)
response['status'] = True
response['data'] = {
'pending_order': len(pending_order_week),
'confirmed_order': len(confirmed_order_week),
'delivered_order': len(delivered_order_week),
'declined_order': len(declined_order_week),
}
elif order_type == "daily":
pending_order_daily = Order.objects.filter(order_status=Order.ORDER_STATUS_PENDING,
created_on__date=today_date
)
if kitchen_id:
pending_order_daily = pending_order_daily.filter(kitchen_id=kitchen_id)
confirmed_order_daily = Order.objects.filter(order_status=Order.ORDER_STATUS_CONFIRMED,
created_on__date=today_date
)
if kitchen_id:
confirmed_order_daily = confirmed_order_daily.filter(kitchen_id=kitchen_id)
delivered_order_daily = Order.objects.filter(order_status=Order.ORDER_STATUS_DELIVERED,
created_on__date=today_date
)
if kitchen_id:
delivered_order_daily = delivered_order_daily.filter(kitchen_id=kitchen_id)
declined_order_daily = Order.objects.filter(order_status=Order.ORDER_STATUS_DECLINED,
created_on__date=today_date,
)
if kitchen_id:
declined_order_daily = declined_order_daily.filter(kitchen_id=kitchen_id)
response['status'] = True
response['data'] = {
'pending_order': len(pending_order_daily),
'confirmed_order': len(confirmed_order_daily),
'delivered_order': len(delivered_order_daily),
'declined_order': len(declined_order_daily),
}
else:
response['status'] = False
response['msg'] = 'Some error occurred ! Please reload the page.'
return JsonResponse(response)
class GetKitchenOrderFilter(GroupRequiredMixin, View):
group_required = ['Super admin', 'Manager']
def post(self, request):
kitchen_id = request.POST.get('kitchen_id')
response = {'status': False, 'msg': '', 'data': {}}
today_date = date.today()
month_list = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12']
pending_order_monthly = []
confirmed_order_monthly = []
delivered_order_monthly = []
declined_order_monthly = []
for i in month_list:
pending_order_monthly.append(
len(Order.objects.filter(order_status=Order.ORDER_STATUS_PENDING, kitchen_id=kitchen_id,
created_on__year=today_date.year, created_on__month=i,
)))
confirmed_order_monthly.append(
len(Order.objects.filter(order_status=Order.ORDER_STATUS_CONFIRMED, kitchen_id=kitchen_id,
created_on__year=today_date.year, created_on__month=i,
)))
delivered_order_monthly.append(
len(Order.objects.filter(order_status=Order.ORDER_STATUS_DELIVERED, kitchen_id=kitchen_id,
created_on__year=today_date.year, created_on__month=i,
)))
declined_order_monthly.append(
len(Order.objects.filter(order_status=Order.ORDER_STATUS_DECLINED, kitchen_id=kitchen_id,
created_on__year=today_date.year, created_on__month=i,
)))
response['status'] = True
response['data'] = {
'pending_order': pending_order_monthly,
'confirmed_order': confirmed_order_monthly,
'delivered_order': delivered_order_monthly,
'declined_order': delivered_order_monthly,
}
return JsonResponse(response)
|
[
"abhi@neargroup.me"
] |
abhi@neargroup.me
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.