blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
00756c170529caf80449aa2e41783c60b2a14dad | Python | yennanliu/CS_basics | /leetcode_python/Binary_Search/find-smallest-letter-greater-than-target.py | UTF-8 | 1,226 | 4.03125 | 4 | [] | no_license | # V0
# V1
# https://blog.csdn.net/fuxuemingzhu/article/details/79137225
# IDEA : LINEAR SEARCH
class Solution(object):
def nextGreatestLetter(self, letters, target):
"""
:type letters: List[str]
:type target: str
:rtype: str
"""
for letter in letters:
# can just do the alphabet ordering check via "<", ">"
# i.e. 'a' > 'c' ; 'b' < 'd'
if ord(letter) > ord(target):
return letter
return letters[0]
# V1'
# https://blog.csdn.net/fuxuemingzhu/article/details/79137225
# IDEA : BINARY SEARCH
class Solution(object):
def nextGreatestLetter(self, letters, target):
"""
:type letters: List[str]
:type target: str
:rtype: str
"""
index = bisect.bisect_right(letters, target)
return letters[index % len(letters)]
# V2
# Time: O(logn)
# Space: O(1)
import bisect
class Solution(object):
def nextGreatestLetter(self, letters, target):
"""
:type letters: List[str]
:type target: str
:rtype: str
"""
i = bisect.bisect_right(letters, target)
return letters[0] if i == len(letters) else letters[i]
| true |
d1987ff6983d4926594a40f89cfaa46e1b255f50 | Python | edge555/Online-Judge-Solves | /AtCoder/Beginner Contest/137/B - One Clue.py | UTF-8 | 99 | 3.359375 | 3 | [] | no_license | n,m=map(int,input().split())
mn=m-n+1
mx=m+n-1
for i in range(mn,mx+1):
print("%d "%(i),end="") | true |
464578cefc05a084c570e4f00f39a7a235d58d7e | Python | oliveirajonathas/python_estudos | /pacote-download/pythonProject/exercicios_python_guanabara/ex005.py | UTF-8 | 209 | 3.859375 | 4 | [
"MIT"
] | permissive | numero = int(input('Digite um número inteiro: '))
antecessor = numero - 1
sucessor = numero + 1
print('O antecessor e o sucessor de {} são, respectivamente, {} e {}.'.format(numero, antecessor, sucessor))
| true |
7e775e054cbbb6aa1810473d6191d282ce6d6b52 | Python | priyansh19/pytype | /pytype/tests/test_errors.py | UTF-8 | 34,937 | 2.75 | 3 | [
"Apache-2.0",
"MIT"
] | permissive | """Tests for displaying errors."""
from pytype import file_utils
from pytype.tests import test_base
class ErrorTest(test_base.TargetIndependentTest):
"""Tests for errors."""
def testDeduplicate(self):
_, errors = self.InferWithErrors("""\
def f(x):
y = 42
y.foobar
f(3)
f(4)
""")
self.assertErrorLogIs(errors, [(3, "attribute-error", r"'foobar' on int$")])
def testUnknownGlobal(self):
_, errors = self.InferWithErrors("""
def f():
return foobar()
""")
self.assertErrorLogIs(errors, [(3, "name-error", r"foobar")])
def testInvalidAttribute(self):
ty, errors = self.InferWithErrors("""\
class A(object):
pass
def f():
(3).parrot
return "foo"
""")
self.assertTypesMatchPytd(ty, """
class A(object):
pass
def f() -> str
""")
self.assertErrorLogIs(errors, [(4, "attribute-error", r"parrot.*int")])
def testImportError(self):
_, errors = self.InferWithErrors("""\
import rumplestiltskin
""")
self.assertErrorLogIs(errors, [(1, "import-error", r"rumplestiltskin")])
def testImportFromError(self):
_, errors = self.InferWithErrors("""\
from sys import foobar
""")
self.assertErrorLogIs(errors, [(1, "import-error", r"sys\.foobar")])
def testNameError(self):
_, errors = self.InferWithErrors("""\
foobar
""")
self.assertErrorLogIs(errors, [(1, "name-error", r"foobar")])
def testWrongArgCount(self):
_, errors = self.InferWithErrors("""\
hex(1, 2, 3, 4)
""")
self.assertErrorLogIs(errors, [(1, "wrong-arg-count", r"expects 1.*got 4")])
def testWrongArgTypes(self):
_, errors = self.InferWithErrors("""\
hex(3j)
""")
self.assertErrorLogIs(errors, [(1, "wrong-arg-types", r"int.*complex")])
def testInterpreterFunctionNameInMsg(self):
_, errors = self.InferWithErrors("""\
class A(list): pass
A.append(3)
""")
self.assertErrorLogIs(
errors,
[(2, "missing-parameter", r"function list.append")]
)
def testPyTDFunctionNameInMsg(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", "class A(list): pass")
_, errors = self.InferWithErrors("""\
import foo
foo.A.append(3)
""", pythonpath=[d.path])
self.assertErrorLogIs(
errors,
[(2, "missing-parameter", r"function list.append")]
)
def testBuiltinFunctionNameInMsg(self):
_, errors = self.InferWithErrors("""\
x = list
x += (1,2)
""")
self.assertErrorLogIs(
errors,
[(2, "missing-parameter", r"function list.__iadd__")]
)
def testRewriteBuiltinFunctionName(self):
"""Should rewrite `function __builtin__.len` to `built-in function len`."""
_, errors = self.InferWithErrors("x = len(None)")
self.assertErrorLogIs(
errors,
[(1, "wrong-arg-types", r"Built-in function len")]
)
def BoundMethodNameInMsg(self):
_, errors = self.InferWithErrors("""\
"".join(1)
""")
self.assertErrorLogIs(
errors,
[(1, "missing-parameter", r"Function str.join")]
)
def testPrettyPrintWrongArgs(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
def f(a: int, b: int, c: int, d: int, e: int): ...
""")
_, errors = self.InferWithErrors("""\
import foo
foo.f(1, 2, 3, "four", 5)
""", pythonpath=[d.path])
self.assertErrorLogIs(errors, [
(2, "wrong-arg-types", ("a, b, c, d: int, [.][.][.].*"
"a, b, c, d: str, [.][.][.]"))])
def testInvalidBaseClass(self):
_, errors = self.InferWithErrors("""\
class Foo(3):
pass
""")
self.assertErrorLogIs(errors, [(1, "base-class-error")])
def testInvalidIteratorFromImport(self):
with file_utils.Tempdir() as d:
d.create_file("mod.pyi", """
class Codec(object):
def __init__(self) -> None: ...
""")
_, errors = self.InferWithErrors("""
import mod
def f():
for row in mod.Codec():
pass
""", pythonpath=[d.path])
error = r"No attribute.*__iter__.*on mod\.Codec"
self.assertErrorLogIs(errors, [(4, "attribute-error", error)])
def testInvalidIteratorFromClass(self):
_, errors = self.InferWithErrors("""\
class A(object):
pass
def f():
for row in A():
pass
""")
self.assertErrorLogIs(errors, [(4, "attribute-error", r"__iter__.*A")])
def testIterOnModule(self):
errors = self.CheckWithErrors("""\
import sys
for _ in sys:
pass
""")
self.assertErrorLogIs(
errors, [(2, "module-attr", r"__iter__.*module 'sys'")])
def testInheritFromGeneric(self):
with file_utils.Tempdir() as d:
d.create_file("mod.pyi", """
from typing import Generic, TypeVar
T = TypeVar("T")
class Foo(Generic[T]): ...
class Bar(Foo[int]): ...
""")
_, errors = self.InferWithErrors("""\
import mod
chr(mod.Bar())
""", pythonpath=[d.path])
# "Line 3, in f: Can't retrieve item out of dict. Empty?"
self.assertErrorLogIs(errors, [(2, "wrong-arg-types", r"int.*mod\.Bar")])
def testWrongKeywordArg(self):
with file_utils.Tempdir() as d:
d.create_file("mycgi.pyi", """
def escape(x: str or int) -> str or int
""")
_, errors = self.InferWithErrors("""\
import mycgi
def foo(s):
return mycgi.escape(s, quote=1)
""", pythonpath=[d.path])
self.assertErrorLogIs(errors, [(3, "wrong-keyword-args",
r"quote.*mycgi\.escape")])
def testMissingParameter(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
def bar(xray, yankee, zulu) -> str
""")
_, errors = self.InferWithErrors("""\
import foo
foo.bar(1, 2)
""", pythonpath=[d.path])
self.assertErrorLogIs(errors, [(2, "missing-parameter",
r"zulu.*foo\.bar")])
def testBadInheritance(self):
_, errors = self.InferWithErrors("""\
class X:
pass
class Bar(X):
pass
class Baz(X, Bar):
pass
""")
self.assertErrorLogIs(errors, [(5, "mro-error")])
def testBadCall(self):
with file_utils.Tempdir() as d:
d.create_file("other.pyi", """
def foo(x: int, y: str) -> str: ...
""")
_, errors = self.InferWithErrors("""\
import other
other.foo(1.2, [])
""", pythonpath=[d.path])
self.assertErrorLogIs(errors, [
(2, "wrong-arg-types", r"\(x: int")])
def testCallUncallable(self):
_, errors = self.InferWithErrors("""\
0()
""")
self.assertErrorLogIs(errors, [(1, "not-callable", r"int")])
def testSuperError(self):
_, errors = self.InferWithErrors("""\
class A(object):
def __init__(self):
super(A, self, "foo").__init__()
""")
self.assertErrorLogIs(errors, [(3, "wrong-arg-count", "2.*3")])
def testAttributeError(self):
with file_utils.Tempdir() as d:
d.create_file("modfoo.pyi", "")
_, errors = self.InferWithErrors("""\
class Foo(object):
def __getattr__(self, name):
return "attr"
def f():
return Foo.foo # line 5
def g(x):
if x:
y = None
else:
y = 1
return y.bar # line 11
def h():
return Foo().foo # No error
import modfoo
modfoo.baz # line 15
""", pythonpath=[d.path])
self.assertErrorLogIs(errors, [
(5, "attribute-error", r"No attribute 'foo' on Type\[Foo\]"),
(11, "attribute-error",
r"No attribute 'bar' on None\nIn Optional\[int\]"),
(11, "attribute-error",
r"No attribute 'bar' on int\nIn Optional\[int\]"),
(15, "module-attr",
"No attribute 'baz' on module 'modfoo'")])
def testAttributeErrorGetAttribute(self):
_, errors = self.InferWithErrors("""\
class Foo(object):
def __getattribute__(self, name):
return "attr"
def f():
return Foo().x # There should be no error on this line.
def g():
return Foo.x
""")
self.assertErrorLogIs(errors, [(7, "attribute-error", r"x")])
def testNoneAttribute(self):
_, errors = self.InferWithErrors("""\
None.foo
""")
self.assertErrorLogIs(errors, [(1, "attribute-error", r"foo")])
def testPyiType(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
def f(x: list[int]) -> int: ...
""")
_, errors = self.InferWithErrors("""\
import foo
foo.f([""])
""", deep=True, pythonpath=[d.path])
self.assertErrorLogIs(errors, [(2, "wrong-arg-types",
r"List\[int\].*List\[str\]")])
def testTooManyArgs(self):
_, errors = self.InferWithErrors("""\
def f():
pass
f(3)
""", deep=True)
self.assertErrorLogIs(errors, [(3, "wrong-arg-count", r"0.*1")])
def testTooFewArgs(self):
_, errors = self.InferWithErrors("""\
def f(x):
pass
f()
""", deep=True)
self.assertErrorLogIs(errors, [(3, "missing-parameter", r"x.*f")])
def testDuplicateKeyword(self):
_, errors = self.InferWithErrors("""\
def f(x, y):
pass
f(3, x=3)
""", deep=True)
self.assertErrorLogIs(errors, [(3, "duplicate-keyword-argument", r"f.*x")])
def testBadImport(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
def f() -> int: ...
class f: ...
""")
_, errors = self.InferWithErrors("""\
import a
""", pythonpath=[d.path])
self.assertErrorLogIs(errors, [(1, "pyi-error")])
def testBadImportDependency(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
from b import X
class Y(X): ...
""")
_, errors = self.InferWithErrors("""\
import a
""", pythonpath=[d.path])
self.assertErrorLogIs(errors, [(1, "pyi-error")])
def testBadImportFrom(self):
with file_utils.Tempdir() as d:
d.create_file("foo/a.pyi", """
def f() -> int: ...
class f: ...
""")
d.create_file("foo/__init__.pyi", "")
_, errors = self.InferWithErrors("""\
from foo import a
""", pythonpath=[d.path])
self.assertErrorLogIs(errors, [(1, "pyi-error", r"foo\.a")])
def testBadImportFromDependency(self):
with file_utils.Tempdir() as d:
d.create_file("foo/a.pyi", """
from a import X
class Y(X): ...
""")
d.create_file("foo/__init__.pyi", "")
_, errors = self.InferWithErrors("""\
from foo import a
""", pythonpath=[d.path])
self.assertErrorLogIs(errors, [(1, "pyi-error", r"foo\.a")])
def testBadContainer(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import SupportsInt
class A(SupportsInt[int]): pass
""")
_, errors = self.InferWithErrors("""\
import a
""", deep=True, pythonpath=[d.path])
self.assertErrorLogIs(errors, [(1, "pyi-error",
r"SupportsInt is not a container")])
def testBadTypeParameterOrder(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import Generic, TypeVar
K = TypeVar("K")
V = TypeVar("V")
class A(Generic[K, V]): pass
class B(Generic[K, V]): pass
class C(A[K, V], B[V, K]): pass
""")
_, errors = self.InferWithErrors("""\
import a
""", deep=True, pythonpath=[d.path])
self.assertErrorLogIs(errors, [(1, "pyi-error", r"Illegal.*order.*a\.C")])
def testDuplicateTypeParameter(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import Generic, TypeVar
T = TypeVar("T")
class A(Generic[T, T]): pass
""")
_, errors = self.InferWithErrors("""\
import a
""", deep=True, pythonpath=[d.path])
self.assertErrorLogIs(errors, [(1, "pyi-error", r"T")])
def testDuplicateGenericBaseClass(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import Generic, TypeVar
T = TypeVar("T")
V = TypeVar("V")
class A(Generic[T], Generic[V]): pass
""")
_, errors = self.InferWithErrors("""\
import a
""", deep=True, pythonpath=[d.path])
self.assertErrorLogIs(errors, [(1, "pyi-error", r"inherit.*Generic")])
def testTypeParameterInModuleConstant(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import TypeVar
T = TypeVar("T")
x = ... # type: T
""")
_, errors = self.InferWithErrors("""\
import a
""", deep=True, pythonpath=[d.path])
self.assertErrorLogIs(errors, [(1, "pyi-error", r"a.*T.*a\.x")])
def testTypeParameterInClassAttribute(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import Generic, TypeVar
T = TypeVar("T")
class A(Generic[T]):
x = ... # type: T
""")
_, errors = self.InferWithErrors("""\
import a
def f():
return a.A.x
""", deep=True, pythonpath=[d.path])
self.assertErrorLogIs(errors, [(3, "unbound-type-param", r"x.*A.*T")])
def testUnboundTypeParameterInInstanceAttribute(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import TypeVar
T = TypeVar("T")
class A(object):
x = ... # type: T
""")
_, errors = self.InferWithErrors("""\
import a
""", deep=True, pythonpath=[d.path])
self.assertErrorLogIs(errors, [(1, "pyi-error", r"a.*T.*a\.A\.x")])
def testPrintUnionArg(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
def f(x: int or str) -> None
""")
_, errors = self.InferWithErrors("""\
import a
x = a.f(4.2)
""", deep=True, pythonpath=[d.path])
pattern = r"Expected.*Union\[int, str\].*Actually passed"
self.assertErrorLogIs(errors, [(2, "wrong-arg-types", pattern)])
def testPrintTypeArg(self):
_, errors = self.InferWithErrors("""\
hex(int)
""", deep=True)
self.assertErrorLogIs(
errors, [(1, "wrong-arg-types", r"Actually passed.*Type\[int\]")])
def testDeleteFromSet(self):
_, errors = self.InferWithErrors("""\
s = {1}
del s[1]
""", deep=True)
self.assertErrorLogIs(
errors, [(2, "unsupported-operands", r"item deletion")])
def testBadReference(self):
ty, errors = self.InferWithErrors("""\
def main():
x = foo
for foo in []:
pass
return x
""", deep=True)
self.assertErrorLogIs(errors, [(2, "name-error", r"foo")])
# Make sure we recovered from the error and got the right return type
self.assertTypesMatchPytd(ty, """
from typing import Any
def main() -> Any
""")
def testSetIntAttribute(self):
_, errors = self.InferWithErrors("""\
x = 42
x.y = 42
""", deep=True)
self.assertErrorLogIs(errors, [(2, "not-writable", r"y.*int")])
def testInvalidParametersOnMethod(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
class A(object):
def __init__(self, x: int) -> None
""")
_, errors = self.InferWithErrors("""\
import a
x = a.A("")
x = a.A("", 42)
x = a.A(42, y="")
x = a.A(42, x=42)
x = a.A()
""", pythonpath=[d.path])
self.assertErrorLogIs(errors, [(2, "wrong-arg-types", r"A\.__init__"),
(3, "wrong-arg-count", r"A\.__init__"),
(4, "wrong-keyword-args", r"A\.__init__"),
(5, "duplicate-keyword-argument",
r"A\.__init__"),
(6, "missing-parameter", r"A\.__init__")])
def testDuplicateKeywords(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
def f(x, *args, y) -> None
""")
_, errors = self.InferWithErrors("""\
import foo
foo.f(1, y=2)
foo.f(1, 2, y=3)
foo.f(1, x=1)
# foo.f(y=1, y=2) # caught by compiler
""", deep=True, pythonpath=[d.path])
self.assertErrorLogIs(errors, [
(4, "duplicate-keyword-argument"),
])
def testInvalidParametersDetails(self):
_, errors = self.InferWithErrors("""\
float(list())
float(1, list(), foobar=str)
float(1, foobar=list())
float(1, x="")
hex()
""")
self.assertErrorLogIs(errors, [
(1, "wrong-arg-types",
r"Actually passed:.*self, x: List\[nothing\]"),
(2, "wrong-arg-count", r"Actually passed:.*self, x, "
r"_, foobar"),
(3, "wrong-keyword-args",
r"Actually passed:.*self, x, foobar"),
(4, "duplicate-keyword-argument",
r"Actually passed:.*self, x, x"),
(5, "missing-parameter", r"Actually passed: \(\)")
])
def testBadSuperClass(self):
_, errors = self.InferWithErrors("""\
class A(object):
def f(self):
return "foo"
class B(A):
def f(self):
return super(self, B).f() # should be super(B, self)
""", deep=True)
self.assertErrorLogIs(errors, [
(7, "wrong-arg-types", r"cls: type.*cls: B")])
@test_base.skip("Need to type-check second argument to super")
def testBadSuperInstance(self):
_, errors = self.InferWithErrors("""\
class A(object):
pass
class B(A):
def __init__(self):
super(B, A).__init__() # A cannot be the second argument to super
""", deep=True)
self.assertErrorLogIs(
errors, [(5, "wrong-arg-types", r"Type\[B\].*Type\[A\]")])
def testBadNameImport(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
import typing
x = ... # type: typing.Rumpelstiltskin
""")
_, errors = self.InferWithErrors("""\
import a
x = a.x
""", pythonpath=[d.path], deep=True)
self.assertErrorLogIs(errors, [(1, "pyi-error", r"Rumpelstiltskin")])
def testBadNameImportFrom(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import Rumpelstiltskin
x = ... # type: Rumpelstiltskin
""")
_, errors = self.InferWithErrors("""\
import a
x = a.x
""", pythonpath=[d.path], deep=True)
self.assertErrorLogIs(errors, [(1, "pyi-error", r"Rumpelstiltskin")])
def testMatchType(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import Type
class A(object): ...
class B(A): ...
class C(object): ...
def f(x: Type[A]) -> bool
""")
ty, errors = self.InferWithErrors("""\
import a
x = a.f(a.A)
y = a.f(a.B)
z = a.f(a.C)
""", pythonpath=[d.path], deep=True)
error = r"Expected.*Type\[a\.A\].*Actual.*Type\[a\.C\]"
self.assertErrorLogIs(errors, [(4, "wrong-arg-types", error)])
self.assertTypesMatchPytd(ty, """
from typing import Any
a = ... # type: module
x = ... # type: bool
y = ... # type: bool
z = ... # type: Any
""")
def testMatchParameterizedType(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import Generic, Type, TypeVar
T = TypeVar("T")
class A(Generic[T]): ...
class B(A[str]): ...
def f(x: Type[A[int]]): ...
""")
_, errors = self.InferWithErrors("""\
import a
x = a.f(a.B)
""", pythonpath=[d.path], deep=True)
expected_error = r"Expected.*Type\[a\.A\[int\]\].*Actual.*Type\[a\.B\]"
self.assertErrorLogIs(errors, [(2, "wrong-arg-types", expected_error)])
def testMROError(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
class A(object): ...
class B(object): ...
class C(A, B): ...
class D(B, A): ...
class E(C, D): ...
""")
_, errors = self.InferWithErrors("""\
import a
x = a.E()
""", pythonpath=[d.path])
self.assertErrorLogIs(errors, [(2, "mro-error", r"E")])
def testBadMRO(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
class A(BaseException, ValueError): ...
""")
_, errors = self.InferWithErrors("""\
import a
class B(a.A): pass
raise a.A()
""", pythonpath=[d.path])
self.assertErrorLogIs(errors, [(2, "mro-error", r"A")])
def testUnsolvableAsMetaclass(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import Any
def __getattr__(name) -> Any
""")
d.create_file("b.pyi", """
from a import A
class B(metaclass=A): ...
""")
_, errors = self.InferWithErrors("""\
import b
class C(b.B):
def __init__(self):
f = open(self.x, 'r')
""", pythonpath=[d.path], deep=True)
self.assertErrorLogIs(errors, [(4, "attribute-error", r"x.*C")])
def testDontTimeoutOnComplex(self):
# Tests that we can solve a complex file without timing out.
# Useful for catching large performance regressions.
ty = self.Infer("""\
if __random__:
x = [1]
else:
x = [1j]
x = x + x
x = x + x
x = x + x
x = x + x
x = x + x
x = x + x
x = x + x
""", deep=False)
self.assertTypesMatchPytd(ty, """
from typing import Any
x = ... # type: Any
""")
def testFailedFunctionCall(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
def f(x: str, y: int) -> bool
def f(x: str) -> bool
""")
_, errors = self.InferWithErrors("""\
import a
x = a.f(0, "")
""", pythonpath=[d.path])
# Tests that [wrong-arg-types] rather than [wrong-arg-count] is reported
self.assertErrorLogIs(errors, [(2, "wrong-arg-types", r"")])
def testNoncomputableMethod(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
T = TypeVar("T")
def copy(x: T) -> T
""")
_, errors = self.InferWithErrors("""\
import a
class A(object):
def __getattribute__(self, name):
return a.copy(self)
x = A()()
""", pythonpath=[d.path])
self.assertErrorLogIs(errors, [(5, "not-callable", r"A")])
def testBadTypeName(self):
_, errors = self.InferWithErrors("""\
X = type(3, (int, object), {"a": 1})
""")
self.assertErrorLogIs(errors, [(1, "wrong-arg-types", r"Actual.*int")])
def testBadTypeBases(self):
_, errors = self.InferWithErrors("""\
X = type("X", (42,), {"a": 1})
""")
self.assertErrorLogIs(errors, [(1, "wrong-arg-types",
r"Actual.*Tuple\[int\]")])
def testHalfBadTypeBases(self):
_, errors = self.InferWithErrors("""\
X = type("X", (42, object), {"a": 1})
""")
self.assertErrorLogIs(errors, [(1, "wrong-arg-types",
r"Actual.*Tuple\[int, Type\[object\]\]")])
def testBadTypeMembers(self):
_, errors = self.InferWithErrors("""\
X = type("X", (int, object), {0: 1})
""")
self.assertErrorLogIs(errors, [(1, "wrong-arg-types",
r"Actual.*Dict\[int, int\]")])
def testRecursion(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
class A(B): ...
class B(A): ...
""")
ty, errors = self.InferWithErrors("""\
import a
v = a.A()
x = v.x # No error because there is an Unsolvable in the MRO of a.A
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import Any
a = ... # type: module
v = ... # type: a.A
x = ... # type: Any
""")
self.assertErrorLogIs(errors, [(2, "recursion-error", r"a\.A")])
def testEmptyUnionOrOptional(self):
with file_utils.Tempdir() as d:
d.create_file("f1.pyi", """\
def f(x: Union): ...
""")
d.create_file("f2.pyi", """\
def f(x: Optional): ...
""")
_, errors = self.InferWithErrors("""\
import f1
import f2
""", pythonpath=[d.path])
self.assertErrorLogIs(errors, [(1, "pyi-error", r"f1.*Union"),
(2, "pyi-error", r"f2.*Optional")])
def testBadDictAttribute(self):
_, errors = self.InferWithErrors("""\
x = {"a": 1}
y = x.a
""")
self.assertErrorLogIs(errors, [(2, "attribute-error",
r"a.*Dict\[str, int\]")])
def testBadPyiDict(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import Dict
x = ... # type: Dict[str, int, float]
""")
_, errors = self.InferWithErrors("""\
import a
""", pythonpath=[d.path])
self.assertErrorLogIs(errors, [(1, "pyi-error", r"2.*3")])
def testCallNone(self):
_, errors = self.InferWithErrors("""\
None()
""")
self.assertErrorLogIs(errors, [(1, "not-callable")])
def testInNone(self):
_, errors = self.InferWithErrors("""\
3 in None
""")
self.assertErrorLogIs(errors, [(1, "unsupported-operands")])
def testNoAttrError(self):
_, errors = self.InferWithErrors("""\
if __random__:
y = 42
else:
y = "foo"
y.upper
""")
self.assertErrorLogIs(errors, [(5, "attribute-error")])
def testAttrError(self):
_, errors = self.InferWithErrors("""\
if __random__:
y = 42
else:
y = "foo"
y.upper
""")
self.assertErrorLogIs(errors, [(5, "attribute-error", "upper.*int")])
def testPrintCallableInstance(self):
_, errors = self.InferWithErrors("""\
from typing import Callable
v = None # type: Callable[[int], str]
hex(v)
""")
self.assertErrorLogIs(errors, [(3, "wrong-arg-types",
r"Actual.*Callable\[\[int\], str\]")])
def testSameNameAndLine(self):
_, errors = self.InferWithErrors("""\
def f(x):
return x + 42
f("hello")
f([])
""")
self.assertErrorLogIs(errors, [(2, "unsupported-operands", r"str.*int"),
(2, "unsupported-operands", r"List.*int")])
def testKwargOrder(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
def f(*args, y, x, z: int): ...
def g(x): ...
""")
_, errors = self.InferWithErrors("""\
import foo
foo.f(x=1, y=2, z="3")
foo.g(42, v4="the", v3="quick", v2="brown", v1="fox")
""", pythonpath=[d.path])
self.assertErrorLogIs(errors, [
(2, "wrong-arg-types", r"x, y, z.*x, y, z"),
(3, "wrong-keyword-args", r"v1, v2, v3, v4")])
def testBadBaseClass(self):
_, errors = self.InferWithErrors("""\
class Foo(None): pass
class Bar(None if __random__ else 42): pass
""")
self.assertErrorLogIs(errors, [
(1, "base-class-error", r"Invalid base class: None"),
(2, "base-class-error", r"Optional\[<instance of int>\]")])
def testCallableInUnsupportedOperands(self):
_, errors = self.InferWithErrors("""\
def f(x, y=None): pass
f in f
""")
self.assertErrorLogIs(errors, [(2, "unsupported-operands",
r"Callable\[\[Any, Any\], Any\].*"
r"Callable\[\[Any, Any\], Any\]")])
def testCleanPyiNamedtupleNames(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
from typing import NamedTuple
X = NamedTuple("X", [])
def f(x: int): ...
""")
_, errors = self.InferWithErrors("""\
import foo
foo.f(foo.X())
""", pythonpath=[d.path])
self.assertErrorLogIs(errors, [(2, "wrong-arg-types", r"`X`")])
def testBadAnnotation(self):
_, errors = self.InferWithErrors("""\
tuple[0]
dict[1, 2]
class A(object): pass
A[3]
""")
self.assertErrorLogIs(errors, [
(1, "not-indexable", r"class tuple"),
(2, "invalid-annotation", r"1.*Not a type"),
(2, "invalid-annotation", r"2.*Not a type"),
(4, "not-indexable", r"class A"),
])
def testRevealType(self):
_, errors = self.InferWithErrors("""\
reveal_type(42 or "foo")
class Foo(object):
pass
reveal_type(Foo)
reveal_type(Foo())
reveal_type([1,2,3])
""")
self.assertErrorLogIs(errors, [
(1, "reveal-type", r"^Union\[int, str\]$"),
(4, "reveal-type", r"^Type\[Foo\]$"),
(5, "reveal-type", r"^Foo$"),
(6, "reveal-type", r"^List\[int\]$"),
])
def testNotProtocol(self):
_, errors = self.InferWithErrors("""\
a = []
a.append(1)
a = "".join(a)
""")
self.assertErrorLogIs(errors, [(
3, "wrong-arg-types", r"\(.*List\[int\]\)$")]) # no protocol details
def testHiddenError(self):
errors = self.CheckWithErrors("""\
use_option = False
def f():
if use_option:
name_error
""")
self.assertErrorLogIs(errors, [(4, "name-error")])
def testUnknownInError(self):
errors = self.CheckWithErrors("""\
def f(x):
y = x if __random__ else None
return y.groups()
""")
self.assertErrorLogIs(errors, [(3, "attribute-error", r"Optional\[Any\]")])
class OperationsTest(test_base.TargetIndependentTest):
"""Test operations."""
def testXor(self):
errors = self.CheckWithErrors("def f(): return 'foo' ^ 3")
self.assertErrorLogIs(errors, [
(1, "unsupported-operands",
r"\^.*str.*int.*'__xor__' on str.*'__rxor__' on int")])
def testAdd(self):
errors = self.CheckWithErrors("def f(): return 'foo' + 3")
self.assertErrorLogIs(errors, [
(1, "unsupported-operands", r"\+.*str.*int.*__add__ on str.*str")])
def testInvert(self):
errors = self.CheckWithErrors("def f(): return ~None")
self.assertErrorLogIs(errors, [
(1, "unsupported-operands", r"\~.*None.*'__invert__' on None")])
def testSub(self):
errors = self.CheckWithErrors("def f(): return 'foo' - 3")
self.assertErrorLogIs(errors, [
(1, "unsupported-operands",
r"\-.*str.*int.*'__sub__' on str.*'__rsub__' on int")])
def testMul(self):
errors = self.CheckWithErrors("def f(): return 'foo' * None")
self.assertErrorLogIs(errors, [
(1, "unsupported-operands", r"\*.*str.*None.*__mul__ on str.*int")])
def testDiv(self):
errors = self.CheckWithErrors("def f(): return 'foo' / 3")
self.assertErrorLogIs(errors, [
(1, "unsupported-operands",
r"\/.*str.*int.*'__(true)?div__' on str.*'__r(true)?div__' on int")])
def testMod(self):
errors = self.CheckWithErrors("def f(): return None % 3")
self.assertErrorLogIs(errors, [
(1, "unsupported-operands", r"\%.*None.*int.*'__mod__' on None")])
def testLShift(self):
errors = self.CheckWithErrors("def f(): return 3 << None")
self.assertErrorLogIs(errors, [
(1, "unsupported-operands",
r"\<\<.*int.*None.*__lshift__ on int.*int")])
def testRShift(self):
errors = self.CheckWithErrors("def f(): return 3 >> None")
self.assertErrorLogIs(errors, [
(1, "unsupported-operands",
r"\>\>.*int.*None.*__rshift__ on int.*int")])
def testAnd(self):
errors = self.CheckWithErrors("def f(): return 'foo' & 3")
self.assertErrorLogIs(errors, [
(1, "unsupported-operands",
r"\&.*str.*int.*'__and__' on str.*'__rand__' on int")])
def testOr(self):
errors = self.CheckWithErrors("def f(): return 'foo' | 3")
self.assertErrorLogIs(errors, [
(1, "unsupported-operands",
r"\|.*str.*int.*'__or__' on str.*'__ror__' on int")])
def testFloorDiv(self):
errors = self.CheckWithErrors("def f(): return 3 // 'foo'")
self.assertErrorLogIs(errors, [
(1, "unsupported-operands",
r"\/\/.*int.*str.*__floordiv__ on int.*int")])
def testPow(self):
errors = self.CheckWithErrors("def f(): return 3 ** 'foo'")
self.assertErrorLogIs(errors, [
(1, "unsupported-operands", r"\*\*.*int.*str.*__pow__ on int.*int")])
def testNeg(self):
errors = self.CheckWithErrors("def f(): return -None")
self.assertErrorLogIs(errors, [
(1, "unsupported-operands", r"\-.*None.*'__neg__' on None")])
def testPos(self):
errors = self.CheckWithErrors("def f(): return +None")
self.assertErrorLogIs(errors, [
(1, "unsupported-operands", r"\+.*None.*'__pos__' on None")])
class InPlaceOperationsTest(test_base.TargetIndependentTest):
"""Test in-place operations."""
def testIAdd(self):
errors = self.CheckWithErrors("def f(): v = []; v += 3")
self.assertErrorLogIs(errors, [
(1, "unsupported-operands",
r"\+\=.*List.*int.*__iadd__ on List.*Iterable")])
class NoSymbolOperationsTest(test_base.TargetIndependentTest):
"""Test operations with no native symbol."""
def testGetItem(self):
errors = self.CheckWithErrors("def f(): v = []; return v['foo']")
self.assertErrorLogIs(errors, [
(1, "unsupported-operands",
r"item retrieval.*List.*str.*__getitem__ on List.*int")])
def testDelItem(self):
errors = self.CheckWithErrors("def f(): v = {'foo': 3}; del v[3]")
d = r"Dict\[str, int\]"
self.assertErrorLogIs(errors, [
(1, "unsupported-operands",
r"item deletion.*{d}.*int.*__delitem__ on {d}.*str".format(d=d))])
def testSetItem(self):
errors = self.CheckWithErrors("def f(): v = []; v['foo'] = 3")
self.assertErrorLogIs(errors, [
(1, "unsupported-operands",
r"item assignment.*List.*str.*__setitem__ on List.*int")])
def testContains(self):
errors = self.CheckWithErrors("def f(): return 'foo' in 3")
self.assertErrorLogIs(errors, [
(1, "unsupported-operands", r"'in'.*int.*str.*'__contains__' on int")])
test_base.main(globals(), __name__ == "__main__")
| true |
1599db72572c54a999eb97ebf4d5f09604868ef6 | Python | altareen/csp | /10Files/stockprices.py | UTF-8 | 370 | 3.296875 | 3 | [] | no_license | # analysing the stock price of baidu.com, ticker symbol: BIDU
total = 0.0
num = 0
fhand = open("bidu.csv")
fhand.readline() # discards the first line of column labels
for line in fhand:
line = line.rstrip()
line = line.split(",")
price = float(line[-2])
total += price
num += 1
average = total/num
print("average closing price = " + str(average))
| true |
fb463d8f64c18b8a634aa3692ed1c07fd5d7ca42 | Python | rodrigolins92/exercicios-diversos | /letras_sao_iguais.py | UTF-8 | 268 | 3.96875 | 4 | [
"Apache-2.0"
] | permissive | def SaoIguais(a, b, c):
if (a == b) and (b == c):
return print("São Iguais")
else:
return print("São diferentes")
x1 = input("Primeira letra: ")
x2 = input("Segunda letra: ")
x3 = input("Terceira letra: ")
resposta = SaoIguais(x1, x2, x3)
| true |
30570d3eeaeebfd4f82b0b092efe909dd0ba6e5a | Python | Shatki/easydoc | /users/models.py | UTF-8 | 3,881 | 2.515625 | 3 | [] | no_license | from django.db import models
from django.contrib.auth.models import AbstractUser, BaseUserManager
from easydoc.validators import phone, alpha_all, login, email
# Класс менеджера должен переопределить методы create_user() и create_superuser().
class UserManager(BaseUserManager):
def create_user(self, username, name, password=None, **kwargs):
if not username:
raise ValueError('имя логина обязательно')
if not name:
raise ValueError('имя пользователя обязательно')
user = self.model(username=username)
if kwargs.get('email'):
user.email = kwargs.get('email')
if kwargs.get('name'):
user.name = kwargs.get('name')
user.set_password(password)
user.is_admin = False
user.save(using=self._db)
return user
def create_superuser(self, username, password, **kwargs):
"""
Used for: python manage.py createsuperuser
"""
user = self.create_user(username, password, **kwargs)
user.name = 'admin'
user.is_superuser = True
user.is_admin = True
user.save(using=self._db)
return user
# Create your models here.
class User(AbstractUser):
class Meta:
verbose_name = 'пользователь'
verbose_name_plural = 'пользователи'
db_table = 'users'
# Имя логина авторизации
username = models.CharField(verbose_name=u'логин для входа в систему', unique=True, max_length=30, db_index=True,
validators=[login])
# Отображаемое имя пользователя
pseudonym = models.CharField(verbose_name=u'псевдоним пользователя в системе', unique=True, max_length=30,
db_index=True,
validators=[alpha_all])
# Авторизация будет происходить по E-mail
email = models.EmailField(verbose_name=u'электронная почта', unique=True, max_length=255, validators=[email])
# Имя - не является обязательным
first_name = models.CharField(verbose_name=u'имя пользователя', max_length=40, blank=True, null=True)
# Фамилия - также не обязательна
last_name = models.CharField(verbose_name=u'фамилия пользователя', max_length=40, blank=True, null=True)
# слоган или статус - куда же без него. Наследство от соц. сетей
tag_line = models.CharField(verbose_name=u'статус', max_length=140, blank=True, null=True)
photo = models.ImageField(verbose_name=u'аватар', blank=True, null=True,
default='defaultprofileimage.jpg')
phone = models.CharField(verbose_name=u'контактный телефон', max_length=12, validators=[phone], null=True)
# Атрибут суперпользователя
is_admin = models.BooleanField(default=False, null=False)
date_joined = models.DateTimeField(verbose_name=u'дата создания', auto_now_add=True)
date_updated = models.DateTimeField(verbose_name=u'последнее обновление', auto_now=True)
# логинимся
USERNAME_FIELD = 'username'
# обязательное поле
REQUIRED_FIELDS = ['pseudonym', 'email', ]
objects = UserManager()
def __unicode__(self):
return '%d: %s' % (self.id, self.pseudonym)
def __str__(self):
return self.email
def get_full_name(self):
return ' '.join([self.first_name, self.last_name])
def get_short_name(self):
return self.first_name
def has_perm(self, perm, obj=None):
return True
| true |
832954655d8855600a5bd92b8438de1ad5470ebf | Python | willymonee/IFB104-News-Feed-Aggregator | /news aggregator/news_aggregator.py | UTF-8 | 38,036 | 3.140625 | 3 | [] | no_license |
#-----Assignment Description-----------------------------------------#
#
# News Feed Aggregator
#
# In this assignment you will combine your knowledge of HTMl/XML
# mark-up languages with your skills in Python scripting, pattern
# matching, and Graphical User Interface design to produce a useful
# application that allows the user to aggregate RSS news feeds.
# See the instruction sheet accompanying this file for full details.
#
#--------------------------------------------------------------------#
#-----Imported Functions---------------------------------------------#
#
# Below are various import statements for helpful functions. You
# should be able to complete this assignment using these
# functions only. Note that not all of these functions are
# needed to successfully complete this assignment.
#
# NB: You may NOT use any Python modules that need to be downloaded
# and installed separately, such as "Beautiful Soup" or "Pillow".
# Only modules that are part of a standard Python 3 installation may
# be used.
# The function for opening a web document given its URL.
# (You WILL need to use this function in your solution,
# either directly or via our "download" function.)
from urllib.request import urlopen
# Import the standard Tkinter functions. (You WILL need to use
# these functions in your solution. You may import other widgets
# from the Tkinter module provided they are ones that come bundled
# with a standard Python 3 implementation and don't have to
# be downloaded and installed separately.)
from tkinter import *
from tkinter import ttk
# Import a special Tkinter widget we used in our demo
# solution. (You do NOT need to use this particular widget
# in your solution. You may import other such widgets from the
# Tkinter module provided they are ones that come bundled
# with a standard Python 3 implementation and don't have to
# be downloaded and installed separately.)
from tkinter.scrolledtext import ScrolledText
from tkinter import scrolledtext
# Functions for finding all occurrences of a pattern
# defined via a regular expression, as well as
# the "multiline" and "dotall" flags. (You do NOT need to
# use these functions in your solution, because the problem
# can be solved with the string "find" function, but it will
# be difficult to produce a concise and robust solution
# without using regular expressions.)
from re import findall, finditer, MULTILINE, DOTALL
# Import the standard SQLite functions (just in case they're
# needed one day).
from sqlite3 import *
#
#--------------------------------------------------------------------#
#-----------------------------------------------------------
#
# A function to download and save a web document. If the
# attempted download fails, an error message is written to
# the shell window and the special value None is returned.
#
# Parameters:
# * url - The address of the web page you want to download.
# * target_filename - Name of the file to be saved (if any).
# * filename_extension - Extension for the target file, usually
# "html" for an HTML document or "xhtml" for an XML
# document or RSS Feed.
# * save_file - A file is saved only if this is True. WARNING:
# The function will silently overwrite the target file
# if it already exists!
# * char_set - The character set used by the web page, which is
# usually Unicode UTF-8, although some web pages use other
# character sets.
# * lying - If True the Python function will hide its identity
# from the web server. This can be used to prevent the
# server from blocking access to Python programs. However
# we do NOT encourage using this option as it is both
# unreliable and unethical!
# * got_the_message - Set this to True once you've absorbed the
# message about Internet ethics.
#
def download(url = 'http://www.wikipedia.org/',
target_filename = 'download',
filename_extension = 'xhtml',
save_file = True,
char_set = 'UTF-8',
lying = True,
got_the_message = False):
# Import the function for opening online documents and
# the class for creating requests
from urllib.request import urlopen, Request
# Import an exception raised when a web server denies access
# to a document
from urllib.error import HTTPError
# Open the web document for reading
try:
if lying:
# Pretend to be something other than a Python
# script (NOT RECOMMENDED!)
request = Request(url)
request.add_header('User-Agent', 'Mozilla/5.0')
if not got_the_message:
print("Warning - Request does not reveal client's true identity.")
print(" This is both unreliable and unethical!")
print(" Proceed at your own risk!\n")
else:
# Behave ethically
request = url
web_page = urlopen(request)
except ValueError:
print("Download error - Cannot find document at URL '" + url + "'\n")
return None
except HTTPError:
print("Download error - Access denied to document at URL '" + url + "'\n")
return None
except Exception as message:
print("Download error - Something went wrong when trying to download " + \
"the document at URL '" + url + "'")
print("Error message was:", message, "\n")
return None
# Read the contents as a character string
try:
web_page_contents = web_page.read().decode(char_set)
except UnicodeDecodeError:
print("Download error - Unable to decode document from URL '" + \
url + "' as '" + char_set + "' characters\n")
return None
except Exception as message:
print("Download error - Something went wrong when trying to decode " + \
"the document from URL '" + url + "'")
print("Error message was:", message, "\n")
return None
# Optionally write the contents to a local text file
# (overwriting the file if it already exists!)
if save_file:
try:
text_file = open(target_filename + '.' + filename_extension,
'w', encoding = char_set)
text_file.write(web_page_contents)
text_file.close()
except Exception as message:
print("Download error - Unable to write to file '" + \
target_filename + "'")
print("Error message was:", message, "\n")
# Return the downloaded document to the caller
return web_page_contents
#
#--------------------------------------------------------------------#
#-----Student's Solution---------------------------------------------#
#
# Put your solution at the end of this file.
#
#Create a Tk window and give it a title
window = Tk()
window.title('Gaming & Anime News Aggregator')
#Set font styles
title_font = ('TW Cen MT Condensed', 32)
heading_font = ('TW Cen MT Condensed', 24)
text_font = ('TW Cen MT Condensed', 20)
#Create label widget for the title
news_mixer_text = Label(window, text = 'Gaming & Anime News Mixer',
font = title_font, width = 10)
#Add image to GUI
anime_image = PhotoImage(file="gaminganime.png")
anime_image_label = Label(window, image = anime_image)
#Create Live News Feed Column
live_news_feed = Canvas(window, bg = 'light yellow', height = 500)
live_news_heading = Label(live_news_feed, text = 'Live News Feeds',
font = heading_font, bg = 'light yellow')
rps_feed = Label(live_news_feed, text = 'Rock Paper Shotgun', font = text_font,
bg = 'light yellow')
rps_select = ttk.Combobox(live_news_feed, values =
['0', '1', '2', '3',
'4','5', '6', '7', '8', '9',
'10', '11', '12'], font = text_font,
state = 'readonly')
rps_select.current(0)#set default value as the 1st value
pcgamer_feed = Label(live_news_feed, text = 'PC Gamer', font = text_font,
bg = 'light yellow')
pcgamer_select = ttk.Combobox(live_news_feed, values =
['0', '1', '2', '3', '4'
,'5', '6', '7', '8', '9',
'10', '11', '12'], font = text_font,
state = 'readonly')
pcgamer_select.current(0)#set default value as the 1st value
#Create Past News Feed Column
past_news_feed = Canvas(window, bg = 'azure')
past_news_heading = Label(past_news_feed, text = 'Past News Feeds',
font = heading_font, bg = 'azure')
crunchyroll_feed = Label(past_news_feed, text = 'Crunchyroll', font = text_font,
bg = 'azure')
crunchyroll_select = ttk.Combobox(past_news_feed, values = ['0', '1', '2', '3', '4'
,'5', '6', '7', '8', '9',
'10', '11', '12'], font = text_font
,state = 'readonly')
crunchyroll_select.current(0)#set default value as the 1st value
kotaku_feed = Label(past_news_feed, text = 'Kotaku', font = text_font,
bg = 'azure')
kotaku_select = ttk.Combobox(past_news_feed, values = ['0', '1', '2', '3', '4'
,'5', '6', '7', '8', '9',
'10', '11', '12'], font = text_font
,state = 'readonly')
kotaku_select.current(0)#set default value as the 1st value
#Create title for stories selected section
stories_selected_title = Label(window, text = 'Stories Selected',
font = heading_font)
#Create stories selected output section
output_section = Frame(window)
#Create a text widget to display the results
text = scrolledtext.ScrolledText(master = output_section, wrap = 'word',
width = 80, height = 11, font = text_font, state = 'normal')
#Open the past news feed files and read them, with exceptions if the file is not found
try:
crunchyroll = open('crunchyroll.xhtml', encoding="utf8")
crunchyroll_text = crunchyroll.read()
crunchyroll.close()
except FileNotFoundError:
print('The crunchyroll.xhtml file cannot be found, select stories from other sources \
or add the file back into the directory folder')
crunchyroll_select = ttk.Combobox(past_news_feed, values = ['0', '1', '2', '3', '4'
,'5', '6', '7', '8', '9',
'10', '11', '12'], font = text_font
,state = 'disabled')
try:
kotaku = open('kotaku.xml', encoding="utf8")
kotaku_text = kotaku.read()
kotaku.close()
except FileNotFoundError:
print('The kotaku.xml file cannot be found, select stories from other sources or \
add the file back into the directory folder')
kotaku_select = ttk.Combobox(past_news_feed, values = ['0', '1', '2', '3', '4'
,'5', '6', '7', '8', '9',
'10', '11', '12'], font = text_font
,state = 'disabled')
#Open the live news feeds and read them with exceptions if the URL is invalid
try:
rps = urlopen('https://www.rockpapershotgun.com/')
rps_text = rps.read().decode("UTF-8")
rps.close()
except OSError: #prints statement and disables the combobox
print('Invalid URL is used when attempting to open Rock Paper Shotgun')
rps_select = ttk.Combobox(live_news_feed, values = ['0', '1', '2', '3', '4'
,'5', '6', '7', '8', '9',
'10', '11', '12'], font = text_font
,state = 'disabled')
except:
print('Could not reach Rock Paper Shotgun, it may be down or updating')
rps_select = ttk.Combobox(live_news_feed, values = ['0', '1', '2', '3', '4'
,'5', '6', '7', '8', '9',
'10', '11', '12'], font = text_font
,state = 'disabled')
try:
pcgamer = urlopen('https://www.pcgamer.com/au/news/')
pcgamer_text = pcgamer.read().decode("UTF-8")
pcgamer.close()
except OSError:
print('Invalid URL is used when attempting to open PC Gamer')
pcgamer_select = ttk.Combobox(live_news_feed, values = ['0', '1', '2', '3', '4'
,'5', '6', '7', '8', '9',
'10', '11', '12'], font = text_font
,state = 'disabled')
except:
print('Could not reach PC Gamer, it may be down or updating')
pcgamer_select = ttk.Combobox(live_news_feed, values = ['0', '1', '2', '3', '4'
,'5', '6', '7', '8', '9',
'10', '11', '12'], font = text_font
,state = 'disabled')
##Create function when export button is pressed
def export():
#get value selected by user for the news feeds converting them from string to int data format
amount_crunchyroll_selected = int(crunchyroll_select.get())
amount_kotaku_selected = int(kotaku_select.get())
amount_rps_selected = int(rps_select.get())
amount_pcgamer_selected = int(pcgamer_select.get())
amount_selected = amount_crunchyroll_selected + amount_kotaku_selected \
+ amount_rps_selected + amount_pcgamer_selected
#Find and display the titles, dates and source of stories selected
#from Crunchyroll
crunchyroll_headline = findall('<title>(.*)</title>', crunchyroll_text)
crunchyroll_date = findall('<pubDate>(.*)</pubDate>', crunchyroll_text)
#find byline and text
crunchyroll_byline = findall('<description>(.*?)<br/><', crunchyroll_text)
crunchyroll_paragraph = findall('/><br/><br/><p>(.*?)</description>', crunchyroll_text)
crunchyroll_image = findall('<br/><img src="(.*)" /><br/><br/><p>', crunchyroll_text)
#Find and display the titles, dates and source of stories selected
#from Kotaku
kotaku_headline = findall('<title>(.*)</title>', kotaku_text)
kotaku_date = findall('<pubDate>(.*)</pubDate>', kotaku_text)
kotaku_image = findall('<img src="(.*)" />', kotaku_text)
kotaku_paragraph = findall('<description>\n*.* /><p>(.*?)</p>', kotaku_text)
kotaku_author = findall('<dc:creator>\n*-<!\[CDATA\[(.*)\]\]>', kotaku_text)
#Find and display the titles, dates and source of stories selected from
#rockpaper shotgun
rps_headline = findall('<p class="title">\n*\s*<a href=".*">(.*)</a>\n*\s*</p>', rps_text)
rps_date = findall('<span>•</span>\n*\s*(.*?)<', rps_text)
rps_author = findall('rel="author">(.*)</a>', rps_text)
rps_image = findall('data-original="(.*)"', rps_text)
rps_paragraph = findall('<div class="excerpt">\s*\n*\s*\n*<p>\n*(.*)</p>\n*.*\n*\s*</div>', rps_text)
#Find and display the titles, dates and source of stories selected from
#PC Gamer
pcgamer_headline = findall('<h3 class="article-name">(.*)</h3>', pcgamer_text)
pcgamer_date = findall('data-published-date="(.*)"></time>', pcgamer_text)
pcgamer_author = findall('<span style="white-space:nowrap">\n(.*)</span>', pcgamer_text)
pcgamer_paragraph = findall('<p class="synopsis">.*\n*(.*)\n*</p>', pcgamer_text)
pcgamer_image = findall('<figure class="article-lead-image-wrap" data-original="(.*)">', pcgamer_text)
#refresh current text displayed
response.delete(1.0, END)
text.delete(1.0, END)
#create for loop to insert the selected amount of reports from crunchyroll
#into text box in GUI
next_value = 1 #value starts at 1 because 1st value is the not a headline
try:
for headlines in range(amount_crunchyroll_selected):
text.insert('insert', '"'+ crunchyroll_headline[next_value] +'"' + ' - '
+ crunchyroll_headline[0] + ' - ' + crunchyroll_date[next_value] + '\n' + '\n')
next_value = next_value + 1
except IndexError: #create exception if the amount selected from the combobox exceeds the \
#amount of stories available in crunchyroll which would cause an Index Error
print('There are not ' + str(amount_crunchyroll_selected) + ' stories available on Crunchyroll, \
there are only ' + str(len(crunchyroll_paragraph)) + ' stories available')
#create for loop to insert the selected amount of reports from kotaku
#into text box in GUI
try:
for headlines in range(amount_kotaku_selected):
text.insert('insert', '"'+ kotaku_headline[next_value] +'"' + ' - '
+ kotaku_headline[0] + ' - ' + kotaku_date[next_value] + '\n' + '\n')
next_value = next_value + 1
except IndexError:
print('There are not ' + str(amount_kotaku_selected) + ' stories available on Kotaku, there are only ' \
+ str(len(kotaku_paragraph)) + ' stories available')
#create for loop to insert the selected amount of reports from RPS
#into the GUI
list_value = 0
try:
for headines in range(amount_rps_selected):
text.insert('insert', '"'+ rps_headline[list_value] +'"' + ' - ' +
'Rock Paper Shotgun' + ' - ' + rps_date[list_value] + '\n' + '\n')
list_value = list_value + 1
except IndexError: #create exception if the amount selected from the combobox exceeds the amount of stories available in RPS which would cause an Index Error
print('There are not ' + str(amount_rps_selected) + ' stories available on Rock Paper Shotgun, there are only: ' + str(len(rps_headline)) + ' stories available')
#Create for loop to insert the selected amount of reports from PC Gamer into GUI
try:
for headlines in range(amount_pcgamer_selected):
text.insert('insert', '"' + pcgamer_headline[list_value] + '"' + ' - '
+ 'PC Gamer' + ' - ' + pcgamer_date[list_value] + '\n' + '\n')
list_value = list_value + 1
except IndexError: #create exception if the amount selected from the combobox exceeds the amount of stories available in PC Gamer which would cause an Index Error
print('There are not ' + str(amount_pcgamer_selected) + ' stories available on PC Gamer, there are only ' + str(len(pcgamer_headline)) + ' stories available')
if amount_selected > 0:
response.insert('insert', ' Files Sucessfully Exported')
else:
response.insert('insert', 'No files selected')
# Name of the exported news file. To simplify marking, your program
# should produce its results using this file name.
#Create exported file
news_file_name = 'news.html'
print('Creating HTML file: ', news_file_name)
html_file = open(news_file_name, 'w', encoding = 'UTF-8')
#write the HTML documentation
html_file.write('''<!DOCTYPE html>
<html>
<head>
<style>
body {
font-family: "Tw Cen MT Condensed", Arial, sans-serif;
}
#container{
width: 70%;
margin: auto;
background-color:#F5F5F5;
}
h1 {
font-size: 48px;
text-align: center;
color: white;
}
h2 {
font-size: 38px;
text-align: center;
text-decoration: underline;
margin-bottom: -10px;
}
h3{
font-size: 32px;
text-align: center;
margin-top: 50px;
}
header {
background-color:#111111;
margin-bottom: -32px;
}
h4{
font-size: 24px;
font-weight: normal;
text-align: center;
margin-top: -5px;
}
p{
font-size: 28px;
width: 65%;
margin-left: auto;
margin-right: auto;
display: block;
text-align: center;
line-height: 1.575;
}
.crunchyroll {
height: 250px;
width: 250px;
}
.kotaku_img{
height: 350px;
width: 575px;
}
img {
display: block;
margin-left: auto;
margin-right: auto;
}
nav {
padding-bottom:5px;
padding-top:5px;
font-size:28px;
text-weight: bold;
color: white;
background-color:#B22222;
position: sticky;
position: -webkit-sticky;
overflow: hidden;
top:0;
}
nav a {
color: white;
padding-right:7.5%;
padding-left:7.5%;
}
hr {
border: 1px solid #B22222;
border-radius: 3px;
}
p a {
color: black;
}
p a:active {
color: #B22222;
}
hr {
width: 85%;
}
</style>
<title>Gaming and Anime News</title>
</head>
<body>
<div id = "container">
<header>
<h1>GAMING AND ANIME NEWS</h1>
</header>
<nav>
<a href="#crunchyroll">CRUNCHYROLL</a> |
<a href="#kotaku">KOTAKU</a> |
<a href="#rps">ROCK PAPER SHOTGUN</a> |
<a href="#pcgamer">PC GAMER</a>
</nav>
''')
#export crunchyroll news
#convert html or remove it when exported
p_value = 0
for paragraph in range (len(crunchyroll_paragraph)):
crunchyroll_paragraph[p_value] = crunchyroll_paragraph[p_value].replace(r'<','<')
crunchyroll_paragraph[p_value] = crunchyroll_paragraph[p_value].replace(r'>','>')
crunchyroll_paragraph[p_value] = crunchyroll_paragraph[p_value].replace(r'&nbsp;',' ')
p_value = p_value + 1
next_value = 1 #value starts at 1 because 1st value is the not a headline
if amount_crunchyroll_selected > 0:
html_file.write('<h2 id = "crunchyroll">News From Crunchyroll</h2>')
for crunchyroll_content in range(amount_crunchyroll_selected):
try:
html_file.write('<h3>'+ crunchyroll_headline[next_value] +'</h3>')
except IndexError: #exception if the findall pattern has returned no values (due to changes in the site)
if len(crunchyroll_headline) == 0:
print('Could not find any headlines for Crunchyroll')
try:
html_file.write('<h4>'+ crunchyroll_date[next_value] +'</h4>')
except IndexError:
if len(crunchyroll_date) == 0:
print('Could not find any dates for Crunchyroll')
try:
html_file.write('<img class = "crunchyroll" src="' + crunchyroll_image[next_value - 1] + '">')
except IndexError:
if len(crunchyroll_image) == 0:
print('Could not find any images for Crunchyroll')
try:
html_file.write('<p>'+ crunchyroll_byline[next_value - 1] + '. ' + crunchyroll_paragraph[next_value - 1] +'</p>')
except IndexError:
if len(crunchyroll_byline) == 0:
print('Could not find any bylines for Crunchyroll')
elif len(crunchyroll_paragraph) == 0:
print('Could not find any text for Crunchyroll')
html_file.write('<hr>')
next_value = next_value + 1
#export kotaku news
if amount_kotaku_selected > 0:
html_file.write('<h2 id = "kotaku">News From Kotaku</h2>')
try:
for kotaku_content in range(amount_kotaku_selected):
try:
html_file.write('<h3>'+ kotaku_headline[next_value] +'</h3>')
except IndexError:
if len(kotaku_headline) == 0:
print('Could not find any headlines for Kotaku')
try:
html_file.write('<h4>'+ kotaku_author[next_value - 1] + ' - ' + kotaku_date[next_value] +'</h4>')
except IndexError:
if kotaku_author == 0:
print('Could not find any authors for Kotaku')
if len(kotaku_date) == 0:
print('Could not find any dates for Kotaku')
try:
html_file.write('<img class = "kotaku_img" src="' + kotaku_image[next_value - 1] + '">')
except IndexError:
if len(kotaku_image) == 0:
print('Could not find any images for Kotaku')
try:
html_file.write('<p>'+ kotaku_paragraph[next_value - 1] +'</p>')
except IndexError:
if len(kotaku_paragraph) == 0:
print('Could not find any text for Kotaku')
html_file.write('<hr>')
next_value = next_value + 1
except IndexError:
if len(kotaku_headline) > 0:
print('Exported: ' + str(len(kotaku_paragraph)) + ' files from Kotaku instead of ' + str(amount_kotaku_selected))
#Export rock paper shotgun news
list_value = 0
if amount_rps_selected > 0:
html_file.write('<h2 id = "rps">News From Rock Paper Shotgun</h2>')
for rps_content in range(amount_rps_selected):
try:
html_file.write('<h3>'+ rps_headline[list_value + 3] +'</h3>')
except IndexError:
if len(rps_headline) == 0:
print('Could not find any headlines for Rock Paper Shotgun')
try:
html_file.write('<h4>'+ 'By:' + rps_author[list_value] + ' - ' + rps_date[list_value] +'</h4>')
except IndexError:
if len(rps_author) == 0:
print('Could not find any authors for Rock Paper Shotgun')
elif len(rps_date) == 0:
print('Could not find any dates for Rock Paper Shotgun')
try:
html_file.write('<img class = "kotaku_img" src="' + rps_image[list_value] + '">')
except IndexError:
if len(rps_image) == 0:
print('Could not find any images for Rock Paper Shotgun')
try:
html_file.write('<p>' + rps_paragraph[list_value] + '</p>')
except IndexError:
if len(rps_paragraph) == 0:
print('Could not find any paragraphs for Rock Paper Shotgun')
html_file.write('<hr>')
list_value = list_value + 1
#Export PC Gamer News
if amount_pcgamer_selected > 0:
html_file.write('<h2 id = "pcgamer">News From PC Gamer</h2>')
for pcgamer_content in range(amount_pcgamer_selected):
try:
html_file.write('<h3>' + pcgamer_headline[list_value] +'</h3>')
except IndexError:
if pcgamer_headline == 0:
print('Could not find any headlines from PC Gamer')
try:
html_file.write('<h4>'+ 'By:' + pcgamer_author[list_value] + ' - ' + pcgamer_date[list_value]+ '</h4>')
except IndexError:
if len(pcgamer_date) == 0:
print('Could not find any dates for PC Gamer')
elif len(pcgamer_author) == 0:
print('Could not find any authors')
try:
html_file.write('<img class = "kotaku_img" src="' + pcgamer_image[list_value] + '">')
except IndexError:
if len(pcgamer_image) == 0:
print('Could not find any images for PC Gamer')
try:
html_file.write('<p>' + pcgamer_paragraph[list_value] + '</p>')
except IndexError:
if len(pcgamer_paragraph) == 0:
print('Could not find any text for PC Gamer')
html_file.write('<hr>')
list_value = list_value + 1
#Write List of Sources
html_file.write('<h3>Sources</h3>')
html_file.write('<p>Crunchyroll - <a href="http://feeds.feedburner.com/crunchyroll/animenews" target="_blank">http://feeds.feedburner.com/crunchyroll/animenews</a></p>')
html_file.write('<p>Kotaku - <a href="https://www.kotaku.com.au/" target="_blank">https://www.kotaku.com.au/</a></p>')
html_file.write('<p>Rock Paper Shotgun - <a href="https://www.rockpapershotgun.com/" target="_blank">https://www.rockpapershotgun.com/</a></p>')
html_file.write('<p>PC Gamer - <a href="https://www.pcgamer.com/au/news/" target="_blank">https://www.pcgamer.com/au/news/</a></p>')
#Write the end of the opening tags
html_file.write('</div>')
html_file.write('</body>')
html_file.write('</html>')
#Close the HTML file
html_file.close()
def save(): #create function for save button to save selections to database
#get value selected by user for the news feeds converting them from string to int data format
amount_crunchyroll_selected = int(crunchyroll_select.get())
amount_kotaku_selected = int(kotaku_select.get())
amount_rps_selected = int(rps_select.get())
amount_pcgamer_selected = int(pcgamer_select.get())
amount_selected = amount_crunchyroll_selected + amount_kotaku_selected \
+ amount_rps_selected + amount_pcgamer_selected
#Find and display the titles, dates and source of stories selected
#from Crunchyroll
crunchyroll_headline = findall('<title>(.*)</title>', crunchyroll_text)
crunchyroll_date = findall('<pubDate>(.*)</pubDate>', crunchyroll_text)
#find byline and text
crunchyroll_byline = findall('<description>(.*?)<br/><', crunchyroll_text)
crunchyroll_paragraph = findall('/><br/><br/><p>(.*?)</description>', crunchyroll_text)
crunchyroll_image = findall('<br/><img src="(.*)" /><br/><br/><p>', crunchyroll_text)
#Find and display the titles, dates and source of stories selected
#from Kotaku
kotaku_headline = findall('<title>(.*)</title>', kotaku_text)
kotaku_date = findall('<pubDate>(.*)</pubDate>', kotaku_text)
kotaku_image = findall('<img src="(.*)" />', kotaku_text)
kotaku_paragraph = findall('<description>\n*.* /><p>(.*?)</p>', kotaku_text)
kotaku_author = findall('<dc:creator>\n*-<!\[CDATA\[(.*)\]\]>', kotaku_text)
#Find and display the titles, dates and source of stories selected from
#rockpaper shotgun
rps_headline = findall('<p class="title">\n*\s*<a href=".*">(.*)</a>\n*\s*</p>', rps_text)
rps_date = findall('<span>•</span>\n*\s*(.*?)<', rps_text)
rps_author = findall('rel="author">(.*)</a>', rps_text)
rps_image = findall('data-original="(.*)"', rps_text)
rps_paragraph = findall('<div class="excerpt">\s*\n*\s*\n*<p>\n*(.*)</p>\n*.*\n*\s*</div>', rps_text)
#Find and display the titles, dates and source of stories selected from
#PC Gamer
pcgamer_headline = findall('<h3 class="article-name">(.*)</h3>', pcgamer_text)
pcgamer_date = findall('data-published-date="(.*)"></time>', pcgamer_text)
pcgamer_author = findall('<span style="white-space:nowrap">\n(.*)</span>', pcgamer_text)
pcgamer_paragraph = findall('<p class="synopsis">.*\n*(.*)\n*</p>', pcgamer_text)
pcgamer_image = findall('<figure class="article-lead-image-wrap" data-original="(.*)">', pcgamer_text)
try:
#make a connection to the database and get a cursor on it
connection = connect(database = "news_log.db")
saved_selections = connection.cursor()
#delete previous selections saved
delete = "DELETE FROM selected_stories"
saved_selections.execute(delete)
#insert new selections
save_value = 0
try:
for selections in range(amount_crunchyroll_selected):
save_value = save_value + 1
saved_selections.execute("INSERT INTO selected_stories(headline,news_feed,publication_date) VALUES(?, ?, ?)",\
(crunchyroll_headline[save_value],'Crunchyroll',crunchyroll_date[save_value]))
except IndexError: #exception when more stories are selected than available
print('You selected more stories from Crunchyroll than available')
try:
for selections in range(amount_kotaku_selected):
save_value = save_value + 1
saved_selections.execute("INSERT INTO selected_stories(headline,news_feed,publication_date) VALUES(?, ?, ?)",\
(kotaku_headline[save_value],'Kotaku',kotaku_date[save_value]))
except IndexError:
print('You selected more stories from Kotaku than available')
try:
for selections in range(amount_pcgamer_selected):
saved_selections.execute("INSERT INTO selected_stories(headline,news_feed,publication_date) VALUES(?, ?, ?)",\
(pcgamer_headline[save_value],'PC Gamer',pcgamer_date[save_value]))
save_value = save_value + 1
except IndexError:
print('You selected more stories from Kotaku than available')
try:
for selections in range(amount_rps_selected):
saved_selections.execute("INSERT INTO selected_stories(headline,news_feed,publication_date) VALUES(?, ?, ?)",\
(rps_headline[save_value],'Rock Paper Shotgun',rps_date[save_value]))
save_value = save_value + 1
except IndexError:
print('You selected more stories from Kotaku than available')
except OperationalError: #exception if connection could not be made with database or tables not present
print('Could not connect to the database')
#commit changes and close connections
connection.commit()
saved_selections.close()
connection.close()
#Create button frames
button_frame = Frame(window)
#Create export selected button
export_button = Button(button_frame, text = 'Export Selected', font = heading_font,
borderwidth = 3, bg = 'white', command = export)
#Create save selections button
save_button = Button(button_frame, text = 'Save Selected', font = heading_font,
borderwidth = 3, bg = 'white', command = save)
#Create area for button response to appear
response = Text(window, font = text_font, width = 30, height = 1)
#Geometry manager to put widgets into main window
news_mixer_text.grid(pady = (10, 0), row = 1, column = 1
,sticky=E+W+S+N)
anime_image_label.grid(pady = (10,0), row = 5, column = 2)
live_news_feed.grid(padx = 15, pady = 15, row = 2, column = 1)
past_news_feed.grid(padx = 15, pady = 15, row = 2, column = 2)
stories_selected_title.grid(row = 3, column = 1, pady = 5)
output_section.grid(row = 4, column = 1, columnspan = 2)
button_frame.grid(pady = 15, row = 5, column = 1)
export_button.pack(side = "left", padx = 10)
save_button.pack(side = "left", padx = 10)
response.grid(padx = 170, pady= (0, 20), row = 6, ipady = 5, ipadx= 5, column = 1)
#Geometry manager to put widgets into live news feed column
live_news_heading.grid(padx = 125, pady = 5, row = 1, column = 1, columnspan = 2)
rps_select.grid(pady = 5, padx= 5, row = 2, column = 2)
rps_feed.grid(pady = 5, padx= 5, row = 2, column = 1)
pcgamer_select.grid(pady = 5, padx= 5, row = 3, column = 2)
pcgamer_feed.grid(pady = (10, 20), row = 3, column = 1)
#Geometry manager to put widgets into past news feed column
past_news_heading.grid(padx = 125, pady = 5, row = 1, column = 1, columnspan = 2)
crunchyroll_select.grid(pady = 5, padx= 5, row = 2, column = 2)
crunchyroll_feed.grid(pady = 5, padx= 5, row = 2, column = 1)
kotaku_select.grid(pady = 5, row = 3, column = 2)
kotaku_feed.grid(pady = (10, 20), row = 3, column = 1)
#Geometry manager to put widgets into stories selected output section
text.grid(padx=20,pady=10)
pass
| true |
005592bca3d79b92250cc19d2c6a704fc86c0c72 | Python | gaborvecsei/Neural-Network-Dreams | /utils.py | UTF-8 | 3,952 | 2.703125 | 3 | [] | no_license | import os
import subprocess
import tempfile
from pathlib import Path
from typing import Tuple, Callable
import cv2
import matplotlib.pyplot as plt
import numpy as np
import youtube_dl
def create_rnn_data(data: np.ndarray, time_steps: int) -> Tuple[np.ndarray, np.ndarray]:
if time_steps >= len(data):
raise ValueError("Length of data is lower then the time_steps value")
nb_of_batches = len(data) - time_steps
x_rnn_data = np.zeros((nb_of_batches, time_steps, data.shape[1]), dtype=data.dtype)
y_rnn_data = np.zeros((nb_of_batches, data.shape[1]), dtype=data.dtype)
for start_index in range(nb_of_batches):
end_index = start_index + time_steps
x_data = data[start_index:end_index]
y_data = data[end_index]
x_rnn_data[start_index] = x_data
y_rnn_data[start_index] = y_data
return x_rnn_data, y_rnn_data
def show_image_grid(images, n_images=10, n_rows=3, figsize=(10, 10), randomize=False) -> None:
n_cols = int(np.ceil(n_images / n_rows))
fig = plt.figure(figsize=figsize)
for i in range(n_images):
rnd = i
if randomize:
rnd = np.random.randint(0, len(images))
image = images[rnd]
ax = fig.add_subplot(n_rows, n_cols, i + 1)
ax.imshow(image)
ax.set_yticks([])
ax.set_xticks([])
def frame_preprocessor(frame: np.ndarray) -> np.ndarray:
frame = cv2.resize(frame, (64, 64))
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
frame = frame.astype(np.float32) / 255.0
return frame
def decoded_frame_postprocessor(frame: np.ndarray) -> np.ndarray:
frame = (frame * 255).astype(np.uint8)
return frame
def get_frames_from_youtube_video(video_url: str,
frame_preprocessor: Callable[[np.ndarray], np.ndarray] = None) -> np.ndarray:
# Downloading the video
output_video_file_path = tempfile.NamedTemporaryFile().name
youtube_downloader_params = {"quiet": False, "outtmpl": output_video_file_path, "format": "best[height<=240]"}
with youtube_dl.YoutubeDL(params=youtube_downloader_params) as ydl:
ydl.download([video_url])
output_video_file_path = Path(output_video_file_path)
# Frame extraction
input_video_file_path = str(list(output_video_file_path.parent.glob(output_video_file_path.stem + "*"))[0])
with tempfile.TemporaryDirectory() as tmp_folder:
output_filename = os.path.join(tmp_folder, '%06d.jpg')
video_to_frames_command = ['ffmpeg', '-loglevel', 'debug', '-i', input_video_file_path, '-vf', 'fps=24',
output_filename]
subprocess.check_call(video_to_frames_command)
os.remove(input_video_file_path)
frame_paths = np.sort(list(Path(tmp_folder).glob("*.jpg")))
frames = [cv2.cvtColor(cv2.imread(str(p)), cv2.COLOR_RGB2BGR) for p in frame_paths]
if frame_preprocessor is not None:
frames = [frame_preprocessor(f) for f in frames]
return np.array(frames)
def convert_video_to_gif(input_video_path, output_gif_path, fps=24):
palette_image_path = "palette.png"
command_palette = 'ffmpeg -y -t 0 -i {0} -vf fps={1},scale=320:-1:flags=lanczos,palettegen {2}'.format(input_video_path,
fps,
palette_image_path)
command_convert = 'ffmpeg -y -t 0 -i {0} -i {1} -filter_complex "fps={2},scale=320:-1:flags=lanczos[x];[x][1:v]paletteuse" {3}'.format(input_video_path,palette_image_path, fps, output_gif_path)
try:
subprocess.check_call(command_palette)
subprocess.check_call(command_convert)
except subprocess.CalledProcessError as exc:
print(exc.output)
raise
finally:
os.remove(palette_image_path) | true |
12db42b8070bc948b07083208ea1e2a250ca7cb1 | Python | eliaspk/Pygame-Genetic-Algorithm | /population.py | UTF-8 | 1,902 | 3.78125 | 4 | [] | no_license | import random
from rocket import Rocket
class Population:
"""
Class that represents the population of the rockets.
Attributes
----------
rockets : list
List of all rockets in game
mating_pool : list
List that will contain a distribution of rockets that depends on their fitness
"""
def __init__(self, pop_size):
"""
Parameters
----------
pop_size : int
Number of rockets in our population
"""
self.rockets = []
self.mating_pool = []
for i in range(pop_size):
self.rockets.append(Rocket())
def evalutate(self, target):
""" Fills our mating pool with a distribution of rockets that is proportional to
their fitness. The higher the rockets fitness, the higher its count is in the
mating pool
Parameters
----------
target : tuple
The x,y coordinates of the rocket's target
"""
maxfit = 0
# Calculate each rocket fitness and save maximum fit
for rocket in self.rockets:
rocket.calc_fitness(target)
if rocket.fitness > maxfit:
maxfit = rocket.fitness
# Normalize fitness of rocket between 0 - 1
for rocket in self.rockets:
rocket.fitness /= maxfit
self.mating_pool = []
# Add rockets to mating pool
for rocket in self.rockets:
n = rocket.fitness * 100
for i in range(round(n)):
self.mating_pool.append(rocket)
def selection(self):
""" Randomly select two parents from the mating pool to merge together
and create a child rocket. Apply mutation to child to increase exploration
"""
newpopulation = []
for rocket in self.rockets:
parentA = random.choice(self.mating_pool).dna
parentB = random.choice(self.mating_pool).dna
child = parentA.crossover(parentB)
child.mutation()
newpopulation.append(Rocket(child))
self.rockets = newpopulation | true |
2a4f61db8a9fc77e22355f197a551a7c2189a88f | Python | Shoter99/Projects | /PythonProjects/Ciphers/vigenera.py | UTF-8 | 423 | 3.046875 | 3 | [] | no_license | import sys
keyword = ""
keyword = sys.argv[1:]
if(keyword == ""): quit()
keyword = str("".join(keyword)).lower()
print("")
keyword = sorted(keyword+"a")
keyword = list(dict.fromkeys(keyword))
for letter in keyword:
letter = ord(letter)
if(96>letter>122):
continue
for _ in range(26):
if(letter <= 122):
print(chr(letter), end=" ")
letter+=1
else:
print(chr(letter-26), end=" ")
letter+=1
print("") | true |
10cdf06df496bb40508df0ac58857ec0f0a04bdb | Python | FelSiq/machine-learning-learning | /deep-learning-algorithm-implementation/from-scratch/deprecated/dl-concepts/regularization.py | UTF-8 | 1,436 | 3.53125 | 4 | [] | no_license | """Implement different types of regularizations."""
import numpy as np
def l2(W: np.ndarray, lambda_: float = 0.01, exclude_bias: bool = False) -> float:
"""Ridge (L2) regularization.
It is defined as the sum of element-wise squared weights.
It has the property of encouraging models with distributed
power between a large amount of evenly-distributed parameters.
Arguments
---------
W : :obj:`np.ndarray`
Array of weights.
lambda_ : :obj:`float`, optional
Regularization power. If 0, no regularization is applied.
The larger this value is, the more regularization is
applied.
exclude_bias : :obj:`bool`, optional
If True, exclude the last column in the regularization
calculation (it is assumed to be the bias column - a column
full of 1s), concept known as `bias trick` to simplify
calculations.
Returns
-------
float
Ridge (L2) regularization value.
"""
reg_factor = 0
if not np.equal(0, lambda_):
if exclude_bias:
W = W[:, :-1]
reg_factor = lambda_ * np.sum(np.square(W))
return reg_factor
def l2_grad(
W: np.ndarray, lambda_: float = 0.01, exclude_bias: bool = False
) -> np.ndarray:
"""Gradient of the Ridge (L2) regularization."""
if exclude_bias:
W = np.hstack((W[:, :-1], np.zeros((W.shape[0], 1))))
return 2.0 * lambda_ * W
| true |
984582fbf9b50b920c2e13f8555598e4065c6479 | Python | robotics-in-concert/rocon_devices | /rocon_device_tools/rocon_iot_bridge/src/rocon_iot_bridge/connector.py | UTF-8 | 1,511 | 2.671875 | 3 | [] | no_license | #!/usr/bin/env python
#
# License: BSD
# https://raw.github.com/robotics-in-concert/rocon_devices/license/LICENSE
#
#################################################################################
from abc import ABCMeta, abstractmethod
class Connector(object):
"""
Abstract base class that defines the API of a iot platform connector.
"""
__metaclass__ = ABCMeta
@abstractmethod
def init(self):
"""
Initialises connector.
"""
pass
@abstractmethod
def close(self):
"""
"""
pass
@abstractmethod
def call_get_device_list(self):
"""
Request target engine to receive all available device list
:returns: List of devices
:rtypes: rocon_device_msgs.msg.Devices
"""
pass
@abstractmethod
def convert_post_to_devices_msg(self, post):
"""
It converts te target engine posts device events into ros msg
:param post: device events
:returns: ROS formatted device events
:rtypes: rocon_device_msgs.msg.Devices
"""
pass
@abstractmethod
def request_configuration_update(self, config):
"""
requests target engine to update its configuration
:param config: Configuration(e.g server addr and port for device event post)
:type config: dict
:returns: Success or fail
:rtype: bool
"""
pass
| true |
b121e25076403c2a3c1520a8ce852fd47a7f603c | Python | extremecoders-re/simuvex | /simuvex/procedures/libc___so___6/strcpy.py | UTF-8 | 646 | 2.515625 | 3 | [
"BSD-2-Clause"
] | permissive | import simuvex
from simuvex.s_type import SimTypeString
class strcpy(simuvex.SimProcedure):
#pylint:disable=arguments-differ
def run(self, dst, src):
self.argument_types = {0: self.ty_ptr(SimTypeString()),
1: self.ty_ptr(SimTypeString())}
self.return_type = self.ty_ptr(SimTypeString())
strlen = simuvex.SimProcedures['libc.so.6']['strlen']
strncpy = simuvex.SimProcedures['libc.so.6']['strncpy']
src_len = self.inline_call(strlen, src)
ret_expr = self.inline_call(strncpy, dst, src, src_len.ret_expr+1, src_len=src_len).ret_expr
return ret_expr
| true |
a3e058dad6eb788389eaf9036f1b948c1289e986 | Python | kdogyun/machine_learning | /HW#4.4.py | UTF-8 | 1,623 | 2.9375 | 3 | [] | no_license | import tensorflow as tf
from tensorflow import keras
import numpy as np
# Data Augmentation (3가지 기법 이상 적용)
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
train_images = train_images / 255.0
test_images = test_images / 255.0
train_images = train_images.reshape(len(train_images), 28, 28, 1)
test_images = test_images.reshape(len(test_images), 28, 28, 1)
model = keras.Sequential()
model.add(keras.layers.Flatten(input_shape=(28, 28, 1)))
model.add(keras.layers.Dense(128, activation=tf.nn.relu))
model.add(keras.layers.Dense(10, activation=tf.nn.softmax))
model.compile(loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
datagen = keras.preprocessing.image.ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# https://keras.io/preprocessing/image/
datagen.fit(train_images)
history = model.fit_generator(datagen.flow(train_images, train_labels, batch_size=32),
epochs=10, steps_per_epoch=train_images.shape[0] / 32)
# 공식홈페이지 예제 참조
# 에폭 5: 학습은 loss 0.7407 acc 0.7215 / 테스트 샘플은 loss 1.4071 acc 0.4301
# 에폭 10: 학습은 loss 0.7054 acc 0.7344 / 테스트 샘플은 loss 1.3949 acc 0.4734
# steps_per_epoch 이게 에폭별 실행 횟수 인건가?
test_loss, test_acc = model.evaluate(test_images, test_labels)
print('Test accuracy:', test_acc)
| true |
4e1148bac32856a404ce9ad2fdef79cb4632ba5f | Python | oolsson/oo_eclipse | /Practise_stuff/pandas/df/time_series/random_holdings.py | UTF-8 | 383 | 2.703125 | 3 | [] | no_license | import random
import pandas as pd
import numpy as np
import time
import heapq
df=pd.DataFrame(np.random.uniform(0,1,11))
x=heapq.nlargest(2, df.values)
df2=pd.DataFrame(index=df.index)
for i in range(0,5):
df=pd.DataFrame(np.random.uniform(0,1,11))
x=heapq.nlargest(2, df.values)
df2[i]=(df>=x[1])*1
# df['e']=(df>=x[1])*1
print x[0]
print df2
| true |
a66c0ed48b358dafebdb84786833505eeb75a4d7 | Python | yknot/adventOfCode | /2016/18_01.py | UTF-8 | 1,273 | 3.859375 | 4 | [] | no_license | def trap(l, c, r):
"""calculate if there is a trap at the specified spot"""
if l == "^" and c == "^" and r == ".":
return True
elif l == "." and c == "^" and r == "^":
return True
elif l == "^" and c == "." and r == ".":
return True
elif l == "." and c == "." and r == "^":
return True
return False
class Maze(object):
"""object to take in the first line and run solve"""
def __init__(self, line, n):
self.line = line
self.n = n
def solve(self):
"""find number of open spaces"""
open_spaces = 0
for _ in range(self.n):
open_spaces += self.line.count(".")
new_line = ""
self.line = "." + self.line + "."
for j, l in enumerate(self.line[1:-1]):
if trap(self.line[j], l, self.line[j + 2]):
new_line += "^"
else:
new_line += "."
self.line = new_line
return open_spaces
assert Maze("..^^.", 3).solve() == 6
assert Maze(".^^.^.^^^^", 10).solve() == 38
input_line = (
".^.^..^......^^^^^...^^^...^...^....^^.^...^."
+ "^^^^....^...^^.^^^...^^^^.^^.^.^^..^.^^^..^^^^^^.^^^..^"
)
print(Maze(input_line, 40).solve())
| true |
714e3498a803734ecd7f2bd1369cd2d32493362a | Python | doomcatLee/pythonScript | /main.py | UTF-8 | 1,867 | 3.421875 | 3 | [] | no_license | import csv
# instantiate empty array
rowArray = [];
# pull out
with open('test.csv', 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
for row in spamreader:
rowArray.append(', '.join(row))
#print(rowArray);
# ABOVE RETURNS ['policyID,statecode,county,eq_site_limit,hu_site_limit,fl_site_limit,fr_site_limit,tiv_2011','pug,goldenRetreiver,samoyed,beagle,,,','pug,goldenRetreiver,,,,]
# Start from 1 and end loop at index 3, because our test.csv has three rows
# We start the loop from 1 not 0 because we don't want to include the first row in our csv file
for i in range(1,3): #REPLACE 3 WITH YOUR ROW COUNTS
# instantiate empty string
lastString = '';
# row is an individual string extracted from our rowArray. 'pug,goldenRetreiver,samoyed,beagle,,,'
row = rowArray[i];
# Turn string row into an array
itemArray = row.split(','); #['pug', 'goldenRetreiver', 'samoyed', 'beagle', '', '', '']
# Loop through our new array called itemArray
for index, val in enumerate(itemArray):
if (val == ''):
# if empty string found, assign the item before as a string called lastString
lastString = itemArray[index-1];
# end loop so we prevent nested looping error
break;
# with our last string now assigned, concatenate with a string 'unclassified_'
outputString = 'unclassified_' + lastString;
# Loop through itemArray again
for index, val in enumerate(itemArray):
if (val == ''):
# replace every empty string with outputString
itemArray[index] = outputString;
print(itemArray);
with open('result.csv', 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',',
quotechar=',', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(itemArray)
| true |
f2e8b6a9a09d9a62d627e581a81b29f6193978d2 | Python | kokoakuma/algorithm_practice | /AOJ/Part5_Search/hash.py | UTF-8 | 1,638 | 3.4375 | 3 | [] | no_license | class Dictionary:
def __init__(self):
self.elements = set()
def insert(self, x):
self.elements.add(x)
def find(self, y):
if y in self.elements:
print('yes')
else:
print('no')
dic = Dictionary()
N = int(input())
for i in range(N):
command = input()
if command[0] == 'i':
dic.insert(command[7:])
else:
dic.find(command[5:])
######
class LinearMap(object):
def __init__(self):
self.items = []
def add(self, key, value):
self.items.append((key, value))
def get(self, key):
for k, v in self.items:
if key == k:
return v
print('key error')
def __str__(self):
return str(self.items)
####
class BetterMap(object):
def __init__(self, n=100):
self.maps = []
for i in range(n):
self.maps.append(LinearMap())
def find_map(self, key):
index = self.index(key)
return self.maps[index]
def index(self, key):
return lash(key) % len(self.maps)
def add(self, key, value):
m = self.find_map(key)
m.add((key, value))
def get(self, key):
m = self.find_map(key)
return m.get(key)
def __str__(self):
return str(self.maps)
####
class HashMap(object):
def __init__(self):
self.maps = BetterMap(2)
self.num = 0
def get(self, key):
return self.maps.get(key)
def add(self, key, value):
if self.num == len(self.maps.maps):
self.resize()
self.maps.add(key, value)
self.num += 1
def resize(self):
new_maps = BetterMap(self.nums * 2)
for m in self.maps.maps:
for k, v in m.items:
new_maps.add(k,v)
self.maps = new_maps | true |
954abe57127c01dd6a85db39326e97ba0e566ef5 | Python | NightKirie/MULTIMEDIA-CONTENT-ANALYSIS | /hw1/src/4_Edge_Detection.py | UTF-8 | 3,908 | 2.6875 | 3 | [] | no_license | import os
import numpy as np
import cv2
import matplotlib.pyplot as plt
import math
import collections
from Ground_Truth import *
def Edge_Detection(file_list, t):
entering_ratio = []
exiting_ratio = []
shot_change_list = []
img_1 = cv2.GaussianBlur(cv2.cvtColor(cv2.imread(file_list[0]), cv2.COLOR_BGR2GRAY), (5, 5), 0)
edge_1 = cv2.Canny(img_1, 10, 100)
for i in range(1, len(file_list)-1):
img_2 = cv2.GaussianBlur(cv2.cvtColor(cv2.imread(file_list[i]), cv2.COLOR_BGR2GRAY), (5, 5), 0)
edge_2 = cv2.Canny(img_2, 10, 100)
entering_edge = edge_2 - edge_1
exiting_edge = edge_1 - edge_2
x_in = collections.Counter(entering_edge.reshape(-1))[255]
x_out = collections.Counter(exiting_edge.reshape(-1))[255]
z_1 = collections.Counter(edge_1.reshape(-1))[255]
z_2 = collections.Counter(edge_2.reshape(-1))[255]
if z_1 != 0 and z_2 != 0:
entering_ratio.append(x_in/z_2)
exiting_ratio.append(x_out/z_1)
if max(x_in/z_2, x_out/z_1) > t:
shot_change_list.append(i)
else:
entering_ratio.append(0)
exiting_ratio.append(0)
edge_1 = edge_2
return (entering_ratio, exiting_ratio, shot_change_list)
if __name__ == "__main__":
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# news
news_file_list = []
for dir_path, _, file_list in os.walk("../news_out"):
news_file_list = [os.path.join(dir_path, file) for file in file_list]
print("Working on news")
with open("../result/4/news.txt", "w+") as f:
(entering_ratio, exiting_ratio, shot_change_list) = Edge_Detection(news_file_list, 0.55)
f.write("Edge change ratio for shot-change detection of news.mpg:\n")
f.write(f"{shot_change_list}\n\n")
plt.plot(entering_ratio, "bx")
plt.plot(exiting_ratio, "g.")
plt.plot(NEWS_ANS, [0]*len(NEWS_ANS), "ro")
plt.savefig("../result/4/news_1.jpg")
plt.clf()
plt.plot(np.maximum(entering_ratio, exiting_ratio))
plt.plot(NEWS_ANS, [0]*len(NEWS_ANS), "ro")
plt.savefig("../result/4/news_2.jpg")
plt.clf()
# soccer
soccer_file_list = []
for dir_path, _, file_list in os.walk("../soccer_out"):
soccer_file_list = [os.path.join(dir_path, file) for file in file_list]
print("Working on soccer")
with open("../result/4/soccer.txt", "w+") as f:
(entering_ratio, exiting_ratio, shot_change_list) = Edge_Detection(soccer_file_list, 0.68)
f.write("Edge change ratio for shot-change detection of soccer.mpg:\n")
f.write(f"{shot_change_list}\n\n")
plt.plot(entering_ratio, "bx")
plt.plot(exiting_ratio, "g.")
plt.plot(SOCCER_ANS, [0]*len(SOCCER_ANS), "ro")
plt.savefig("../result/4/soccer_1.jpg")
plt.clf()
plt.plot(np.maximum(entering_ratio, exiting_ratio))
plt.plot(SOCCER_ANS, [0]*len(SOCCER_ANS), "ro")
plt.savefig("../result/4/soccer_2.jpg")
plt.clf()
# ngc
ngc_file_list = []
for dir_path, _, file_list in os.walk("../ngc_out"):
ngc_file_list = [os.path.join(dir_path, file) for file in file_list]
print("Working on ngc")
with open("../result/4/ngc.txt", "w+") as f:
(entering_ratio, exiting_ratio, shot_change_list) = Edge_Detection(ngc_file_list, 0.8)
f.write("Edge change ratio for shot-change detection of ngc.mpg:\n")
f.write(f"{shot_change_list}\n\n")
plt.plot(entering_ratio, "bx")
plt.plot(exiting_ratio, "g.")
plt.plot(NGC_ANS, [0]*len(NGC_ANS), "ro")
plt.savefig("../result/4/ngc_1.jpg")
plt.clf()
plt.plot(np.maximum(entering_ratio, exiting_ratio))
plt.plot(NGC_ANS, [0]*len(NGC_ANS), "ro")
plt.savefig("../result/4/ngc_2.jpg")
plt.clf() | true |
6fa56e1a69c0f5dcbb93dc139ed05e6ade7c3772 | Python | ericmoritz/gittest | /utils.py | UTF-8 | 172 | 2.75 | 3 | [] | no_license | """This is a common utils file"""
def add(x, y):
return x + y
def sub(x, y):
return x - y
def multi(x, y):
return x * y
def divide(x, y):
return x / y
| true |
d5b47817498d6cd0a6d50868609566ec9b5d2653 | Python | WoodsChoi/algorithm | /al/al-336.py | UTF-8 | 5,514 | 3.796875 | 4 | [] | no_license | # 回文对
# hard
'''
可拼接成回文串。
示例 1:
输入: ["abcd","dcba","lls","s","sssll"]
输出: [[0,1],[1,0],[3,2],[2,4]]
解释: 可拼接成的回文串为 ["dcbaabcd","abcddcba","slls","llssssll"]
示例 2:
输入: ["bat","tab","cat"]
输出: [[0,1],[1,0]]
解释: 可拼接成的回文串为 ["battab","tabbat"]
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/palindrome-pairs
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
----------------------------------------
题解:字典树 + 回文串判断(可以马拉车)
其实没啥太难的,就是复杂麻烦,做个字典树,把每个 word 倒着存进去,然后再遍历一遍 words,找到能够配对的 word 组合
然后对比,组合有三类:
1. 正好和 word 匹配
2. 和 word 的前缀匹配,需要把 word 剩下的后缀再判一遍是否回文
3. word 和字典树里找到的那些 words 的前缀匹配,需要把 words 里的每个剩下的后缀判一遍是否回文,当然字典树是倒叙放进去的,
所以判这些 words 的前缀是否回文即可
这里的 2,3 部分我用了个哈希map存储子串是否回文的结果,避免之后的数据中会有重复计算回文的情况
官方题解还有一办法是,枚举所有 word,找出它的所有前缀回文,和后缀回文,判断把回文串截掉后剩下的字符串,如果在 words 里,
那么就可以构成回文对,然后可以把所有 words 放进一个哈希表或者字典树里
还有个优化版,就是利用拉马车算法线性找回文,然后剩下的字符串在字典树里找,需要建两个字典树,正向和反向,用来对 word 的前缀和后缀的查找
'''
class Solution:
def palindromePairs(self, words):
trie = Trie()
dict = {}
ans = []
for i in range(len(words)):
word = words[i]
trie.addWord(word, i)
if len(word) > 0:
dict[word] = [None] * len(word)
dict[word][0] = True
dict[word][-1] = isPalindrome(word, 0, len(word) - 1)
for i in range(len(words)):
word = words[i]
result = trie.search(word, i)
if result[0] != None:
ans.append([i, result[0]])
for j in result[1]:
index = len(words[j])
if index == 0:
if dict[word][-1]:
ans.append([i, j])
else:
if isPalindrome(word, len(words[j]), len(word) - 1):
ans.append([i, j])
node = result[2]
if node:
remains = trie.getRemainStr(node)
for remain in remains:
Str = remain[0]
index = remain[1]
flag = dict[words[index]][len(Str) - 1]
if flag == None:
flag = isPalindrome(words[index], 0, len(Str) - 1)
dict[words[index]][len(Str) - 1] = flag
if flag:
ans.append([i, index])
return ans
class TrieNode:
def __init__(self, val, index = None):
self.val = val
self.children = []
self.endIndex = index
def addChild(self, node):
self.children.append(node)
def findChild(self, val):
for node in self.children:
if node.val == val:
return node
return None
def setEnd(self, index):
self.endIndex = index
class Trie:
def __init__(self):
self.root = TrieNode('*', None)
def addWord(self, word, index):
parent = self.root
for i in range(len(word) - 1, -1, -1):
node = parent.findChild(word[i])
if not node:
node = TrieNode(word[i])
parent.addChild(node)
parent = node
parent.setEnd(index)
def search(self, word, index):
matchedIndex = None
finishedIndex = []
parent = self.root
if parent.endIndex != None and index != parent.endIndex:
finishedIndex.append(parent.endIndex)
for i in range(len(word)):
node = parent.findChild(word[i])
if node:
if node.endIndex != None and index != node.endIndex:
if i == len(word) - 1:
matchedIndex = node.endIndex
else:
finishedIndex.append(node.endIndex)
parent = node
else:
parent = None
break
return (matchedIndex, finishedIndex, parent)
def getRemainStr(self, parent):
arr = []
def dfs(node, Str):
nStr = Str + (node.val or '')
if node.endIndex != None:
remain = nStr[1:]
if remain:
arr.append((remain, node.endIndex))
for child in node.children:
dfs(child, nStr)
dfs(parent, '')
return arr
def isPalindrome(word, l, r):
while l < r:
if word[l] != word[r]:
return False
l += 1
r -= 1
return True
print(Solution().palindromePairs(["abcd","dcba","lls","s","sssll"]))
print(Solution().palindromePairs(["bat","tab","cat"]))
print(Solution().palindromePairs(['a', ''])) | true |
80bfd04b9ee9b214487d2821c5974416ec81f907 | Python | tzlaine/flat_map | /perf/linux_gcc_data/std_map.py | UTF-8 | 2,039 | 2.6875 | 3 | [] | no_license | int_timings = [
{'size': 8, 'insert': 0.0082838,'iterate': 0.0010476,'find': 0.0056996,},
{'size': 16, 'insert': 0.016761,'iterate': 0.0009918,'find': 0.0102254,},
{'size': 32, 'insert': 0.035271,'iterate': 0.0017184,'find': 0.0195838,},
{'size': 64, 'insert': 0.0716708,'iterate': 0.0033526,'find': 0.0403268,},
{'size': 128, 'insert': 0.140327,'iterate': 0.0189828,'find': 0.0793258,},
{'size': 256, 'insert': 0.275705,'iterate': 0.0292354,'find': 0.16051,},
{'size': 512, 'insert': 0.619743,'iterate': 0.052814,'find': 0.368542,},
{'size': 1024, 'insert': 1.3081,'iterate': 0.107221,'find': 0.618014,},
{'size': 2048, 'insert': 2.00414,'iterate': 0.342502,'find': 1.35856,},
{'size': 4096, 'insert': 3.80635,'iterate': 0.716739,'find': 2.70511,},
{'size': 8192, 'insert': 8.52222,'iterate': 0.905436,'find': 4.77139,},
{'size': 16384, 'insert': 19.094,'iterate': 2.86479,'find': 11.1934,},
{'size': 32768, 'insert': 31.4365,'iterate': 6.38689,'find': 23.9401,},
]
string_timings = [
{'size': 8, 'insert': 0.0051682,'iterate': 0.0006566,'find': 0.0046372,},
{'size': 16, 'insert': 0.0119272,'iterate': 0.0009918,'find': 0.00968,},
{'size': 32, 'insert': 0.0225582,'iterate': 0.0015784,'find': 0.0191502,},
{'size': 64, 'insert': 0.0500508,'iterate': 0.0031848,'find': 0.0390134,},
{'size': 128, 'insert': 0.103643,'iterate': 0.0109092,'find': 0.0807732,},
{'size': 256, 'insert': 0.226273,'iterate': 0.0213294,'find': 0.172696,},
{'size': 512, 'insert': 0.463841,'iterate': 0.0958922,'find': 0.46503,},
{'size': 1024, 'insert': 1.57678,'iterate': 0.241567,'find': 1.0416,},
{'size': 2048, 'insert': 2.09114,'iterate': 0.344876,'find': 1.5437,},
{'size': 4096, 'insert': 8.25071,'iterate': 1.03798,'find': 5.28071,},
{'size': 8192, 'insert': 17.8301,'iterate': 1.97082,'find': 11.3394,},
{'size': 16384, 'insert': 23.6381,'iterate': 2.8992,'find': 16.1867,},
{'size': 32768, 'insert': 95.2939,'iterate': 9.00841,'find': 56.8139,},
]
| true |
a3db65c060368d960fc5dd0666b9ea99e52a78f9 | Python | tonidezman/sleep-settings | /joan_sleep/dashboard/tests/test_forms.py | UTF-8 | 1,736 | 2.671875 | 3 | [] | no_license | from django.test import TestCase
from datetime import time
from dashboard.models import SleepSetting
from dashboard.forms import SleepSettingsForm
class SleepSettingsFormTest(TestCase):
def setUp(self):
setting = SleepSetting()
setting.monday = True
setting.save()
def test_valid_form_with_correct_from_and_to_time(self):
self.assertEqual(SleepSetting.objects.count(), 1)
setting = SleepSetting.objects.first()
setting.from_time = time(6, 0)
setting.to_time = time(7, 0)
data = {
"monday": setting.monday,
"tuesday": setting.tuesday,
"wednesday": setting.wednesday,
"thursday": setting.thursday,
"friday": setting.friday,
"saturday": setting.saturday,
"sunday": setting.sunday,
"from_time": setting.from_time,
"to_time": setting.to_time,
}
form = SleepSettingsForm(data=data)
self.assertTrue(form.is_valid())
def test_invalid_form_with_for_from_and_to_time(self):
self.assertEqual(SleepSetting.objects.count(), 1)
setting = SleepSetting.objects.first()
setting.from_time = time(6, 0)
setting.to_time = time(5, 0)
data = {
"monday": setting.monday,
"tuesday": setting.tuesday,
"wednesday": setting.wednesday,
"thursday": setting.thursday,
"friday": setting.friday,
"saturday": setting.saturday,
"sunday": setting.sunday,
"from_time": setting.from_time,
"to_time": setting.to_time,
}
form = SleepSettingsForm(data=data)
self.assertFalse(form.is_valid())
| true |
91cb53ace91de425fb4bf83859eb8b62c69d6de2 | Python | sullivat/primer-calc | /test_primer_calc.py | UTF-8 | 1,417 | 3.1875 | 3 | [
"MIT"
] | permissive | from primer_calc import *
# Testing Primer class initialization
def test_normal_primer_init_str():
primer = Primer('Test', 'aaacccgggttt')
assert primer.name == 'Test'
assert primer.sequence == 'aaacccgggttt'
def test_abnormal_primer_init():
primer = Primer('Test Primer', 'qewropiuqerattdfcgckljaaaafd')
assert primer.name == 'Test Primer'
assert primer.sequence == 'attcgcaaaa'
primer = Primer('test numbers', 123456)
assert primer.name == 'test numbers'
assert primer.sequence == 'Invalid sequence entered'
primer = Primer('Test letter case', 'aAacCcgGgtTt')
assert primer.sequence == 'aaacccgggttt'
def test_primer_calc(capsys):
primer = Primer('test', 'acgtacgtacgt')
# run calculations and assign values of results
primer.calc_all()
# tests
assert primer.trip_upper == 'ACG-TAC-GTA-CGT-'
assert primer.mw == 3643.44
assert primer.gc == 50.0
assert primer.std_tm == 36
assert primer.__len__() == 12
assert primer.__str__() == "test: 5'-ACG-TAC-GTA-CGT-3'"
def test_primer_printout(capsys):
primer = Primer('test', 'acgtacgtacgt')
# run calculations and assign values of results
primer.calc_all()
# sysout testing
primer.print_mw()
out, err = capsys.readouterr()
assert out == "3643.44 daltons (g/M)\n"
primer.print_gc()
out, err = capsys.readouterr()
assert out == "50.0 % GC\n"
| true |
fe3e6cd9d0a8e49fa589c80b4823ce0eeb8b5ea3 | Python | BoxuanMa/vis-for-course | /lda.py | UTF-8 | 1,417 | 2.5625 | 3 | [] | no_license | # -*- coding: utf-8 -*
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
chachedWords = stopwords.words('english')
from nltk.stem.porter import PorterStemmer
from gensim import corpora, models
import gensim
import csv
import numpy as np
np.set_printoptions(threshold=np.inf)
doc_set=[]
f=open("syllabus_en.csv", "r", encoding='utf-8')
reader=csv.reader(f)
for item in reader:
doc_set.append(item[3])
tokenizer = RegexpTokenizer(r'\w+')
en_stop = stopwords.words('english')
p_stemmer = PorterStemmer()
texts = []
for i in doc_set:
raw = i.lower()
tokens = tokenizer.tokenize(raw)
stopped_tokens = [i for i in tokens if not i in en_stop]
stemmed_tokens = [p_stemmer.stem(i) for i in stopped_tokens]
texts.append(stemmed_tokens)
dictionary = corpora.Dictionary(texts)
dictionary.filter_extremes(no_below=1,no_above=0.8)
dictionary.filter_n_most_frequent(5)
corpus = [dictionary.doc2bow(text) for text in texts]
# generate LDA model
ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=6, id2word=dictionary, update_every=5, chunksize=10000, passes=100)
print(ldamodel.print_topics(num_topics=6,num_words=15))
vec=[]
for text in texts:
bow = dictionary.doc2bow(text)
a=ldamodel.get_document_topics(bow)
vec.append(a)
matrix = gensim.matutils.corpus2dense(vec, num_terms=6)
np.savetxt("matrix.txt", matrix.T, fmt="%.5f", delimiter=",")
| true |
5cf6c86218a8b87122f3ed1433f21a1bf3c2f11e | Python | mighty1231/stamina | /data.py | UTF-8 | 5,159 | 3.21875 | 3 | [] | no_license |
class Data:
def __init__(self, fname):
pos = []
neg = []
alphabet_size = -1 # 0 to alphabet_size-1
with open(fname, 'rt') as f:
for line in f.readlines():
tokens = line.split(' ')
if tokens[-1] == '\n':
tokens = tokens[:-1]
# evaluate maximum alphabet
string = bytes(map(int, tokens[1:]))
for s in string:
if s >= alphabet_size:
alphabet_size = s+1
if tokens[0] == '+':
if string not in pos: # remove redundancy. Does multiple value prove its importance?
pos.append(string)
elif tokens[0] == '-':
if string not in neg: # remove redundancy. Does multiple value prove its importance?
neg.append(string)
else:
raise RuntimeError('Error on parsing %s:%s' % (fname, line))
self.pos = pos
self.neg = neg
self.alphabet_size = alphabet_size
class PTA:
# prefix
# transition : PTA.DICT = dictionary transition_int->to_pta
def __init__(self, isPositive = None):
self.dict = dict()
self.acceptance = isPositive
def register(self, string, isPositive):
if string == []:
if self.acceptance == None:
self.acceptance = isPositive
else:
assert self.acceptance == isPositive
return
c = string.pop(0)
if c in self.dict:
self.dict[c].register(string, isPositive)
else:
pta = PTA()
pta.register(string, isPositive)
self.dict[c] = pta
@staticmethod
def fromTXT(fname):
data = Data(fname)
pta = PTA()
for string in data.pos:
pta.register(list(string), True)
for string in data.neg:
pta.register(list(string), False)
return pta
# return the number of nodes
def __len__(self):
# BFS
todo = [self]
cnt = 0
while todo:
node = todo.pop(0)
cnt += 1
for c in node.dict:
todo.append(node.dict[c])
return cnt
def makeViz(self):
edges = [] # fromState, toState, alphabet
positiveStates = []
negativeStates = []
# BFS
todo = [(self, 0)]
node_cnt = 1
while todo:
node, idx = todo.pop(0)
if node.acceptance == True:
positiveStates.append(idx)
elif node.acceptance == False:
negativeStates.append(idx)
for char in node.dict:
edges.append((idx, node_cnt, char))
todo.append((node.dict[char], node_cnt))
node_cnt += 1
posColor = "0.408 0.498 1.000"
negColor = "0.000 1.000 1.000"
string = 'digraph fsm {\n\tnode [style=filled];\n'
for fromState, toState, alphabet in edges:
string += '\t%d -> %d [ label = "%s" ];\n' % (fromState, toState, alphabet)
for state in positiveStates:
string += '\t%d [ color="%s" ];\n' % (state, posColor)
for state in negativeStates:
string += '\t%d [ color="%s" ];\n' % (state, negColor)
string += '}\n'
return string
def makeSimpleViz(self):
edges = [] # fromState, toState, alphabet
positiveStates = []
negativeStates = []
# BFS
todo = [(self, 0)]
node_cnt = 1
while todo:
node, idx = todo.pop(0)
if node.acceptance == True:
positiveStates.append(idx)
elif node.acceptance == False:
negativeStates.append(idx)
for char in node.dict:
edges.append((idx, node_cnt, char))
todo.append((node.dict[char], node_cnt))
node_cnt += 1
# check nodes to simplify
prevState = [None for _ in range(node_cnt)]
nextState = [None for _ in range(node_cnt)]
for fromState, toState, alphabet in edges:
if nextState[fromState] == None:
nextState[fromState] = (toState, alphabet)
else:
nextState[fromState] = -1 # more than one item
if prevState[toState] == None:
prevState[toState] = (fromState, alphabet)
else:
prevState[toState] = -1 # more than one item
def isSimplifiable(node):
return node not in positiveStates and node not in negativeStates and \
isinstance(prevState[node], tuple) and isinstance(nextState[node], tuple)
for curNode in range(node_cnt):
if isSimplifiable(curNode):
_prev, a1 = prevState[curNode]
_next, a2 = nextState[curNode]
string = [a1, a2]
edges.remove((_prev, curNode, a1))
edges.remove((curNode, _next, a2))
while isSimplifiable(_next):
_nextofnext, aa = nextState[_next]
string.append(aa)
prevState[_next] = None # prevent duplicate search
edges.remove((_next, _nextofnext, aa))
_next = _nextofnext
edges.append((_prev, _next, string))
# render
posColor = "0.408 0.498 1.000"
negColor = "0.000 1.000 1.000"
string = 'digraph fsm {\n\tnode [style=filled];\n'
for fromState, toState, alphabet in edges:
string += '\t%d -> %d [ label = "%s" ];\n' % (fromState, toState, alphabet)
for state in positiveStates:
string += '\t%d [ color="%s" ];\n' % (state, posColor)
for state in negativeStates:
string += '\t%d [ color="%s" ];\n' % (state, negColor)
string += '}\n'
return string
class Individual:
def __init__(self, basis = []):
# self.merge_basis = [(state1, state2), (state1', state2'), (state4, state5)]
self.merge_basis = basis
if __name__ == "__main__":
# pta = PTA.fromTXT("../grid/28_training.txt")
# print(pta.makeSimpleViz())
def getLengthOfPTA():
for i in range(1, 101):
pta = PTA.fromTXT("../grid/%d_training.txt" % i)
print(len(pta))
getLengthOfPTA() | true |
d76d50e69f2a98ae5975aca357519057e450706b | Python | ehauckdo/marioGraph | /helper/reachability.py | UTF-8 | 1,067 | 3.03125 | 3 | [] | no_license | import logging, inspect
logger = logging.getLogger(__name__)
def is_reachable(p1, p2, n, dist=4):
logger.debug(" (CALL) {}".format(inspect.stack()[0][3]))
def area(p1_x, p1_y, p2_x, p2_y, p3_x, p3_y):
return abs((p1_x*(p2_y-p3_y) + p2_x*(p3_y-p1_y)
+ p3_x*(p1_y-p2_y))/2.0)
def inside_triangle(n1, n2, dist):
p1_x = n1.x - dist
p1_y = n1.y
p2_x = n1.x + dist
p2_y = n1.y
p3_x = n1.x
p3_y = n1.y - dist
A = area(p1_x, p1_y, p2_x, p2_y, p3_x, p3_y)
A1 = area(n2.x, n2.y, p2_x, p2_y, p3_x, p3_y)
A2 = area(p1_x, p1_y, n2.x, n2.y, p3_x, p3_y)
A3 = area(p1_x, p1_y, p2_x, p2_y, n2.x, n2.y)
return abs(A1 + A2 + A3 - A) <= 0.001
def inside_rectangle(n1, n2, n3, dist):
top = n1.y - dist
bottom = n1.y
left = n1.x
right = n2.x
if n3.x >= left and n3.x <= right and n3.y <= bottom and n3.y >= top:
return True
else: return False
is_inside = inside_triangle(p1, n, dist) or inside_triangle(p2,n,dist) or inside_rectangle(p1, p2, n, dist)
logger.debug(" (RTRN) {}".format(inspect.stack()[0][3]))
return is_inside | true |
b394ca68f5fff753311b909b4dc3de458492f7e8 | Python | uetiko/Algoritmos | /abarrientos/algoritmos.py | UTF-8 | 1,130 | 3.640625 | 4 | [] | no_license | class Ordenamiento(object):
aux = None
listaNoOrdenada = None
sizeList = None
def __init__(self):
self.aux = 0
self.listaNoOrdenada = list()
self.sizeList = 0
def crearLista(self):
lengthList = int(raw_input('Cuantos elementos tendra su lista?'))
for index in range(0, lengthList):
element = 0
element = int(raw_input(
"Ingrese el elemento numero {}".format(index)
))
self.listaNoOrdenada.append(element)
self.sizeList = len(self.listaNoOrdenada)
def imprimeLista(self):
for valor in self.listaNoOrdenada:
print("valor: {}".format(valor))
def ordenamientoBurbuja(self):
for index1 in range(self.listaNoOrdenada - 1, 1):
for index2 in range(1, index1):
if self.listaNoOrdenada[index2] > self.listaNoOrdenada[index2 +1 ]:
self.aux = self.listaNoOrdenada[index2]
self.listaNoOrdenada[index2] = self.listaNoOrdenada[index2 + 1]
self.listaNoOrdenada[index2 + 1] = self.aux
| true |
a07145efc692d3aa58b4ff213404890ef2e8ba9d | Python | gorilik324/ctax | /src/BalanceQueue.py | UTF-8 | 4,616 | 3.296875 | 3 | [
"MIT"
] | permissive | from collections import deque, defaultdict
from decimal import Decimal
from enum import Enum
from functools import reduce
from src.NumberUtils import currency_to_string
from src.bo.SellInfo import SellInfo
from src.bo.Transaction import TransactionType
class QueueType(Enum):
"""
Type of queue.
"""
FIFO = 0
LIFO = 1
class BalanceQueue:
"""
Queue that keeps a tab of amounts bought, their price, and the current balance.
When an amount is sold, its cost and buying fees are calculated by FIFO (default) or LIFO principle.
"""
def __init__(self, queue_type, fees_are_tax_deductible):
self.queue_type = queue_type
self.fees_are_tax_deductible = fees_are_tax_deductible
self.queues = defaultdict(lambda: deque())
def get_balance(self, currency):
"""
Returns the balance for the specified currency.
"""
return reduce(lambda a, b: a + b.amount, self.queues[currency], Decimal(0))
def trade(self, trade):
"""
Simulates a trade.
:return: a SellInfo object with information about the selling part of the trade
"""
sell = trade.get_transaction(TransactionType.SELL)
sell_info = self._sell(self.queues[sell.currency], trade)
buy = trade.get_transaction(TransactionType.BUY)
self._buy(self.queues[buy.currency], trade)
return sell_info
def _buy(self, queue, trade):
self._put(queue, Item(trade.get_transaction(TransactionType.BUY).amount, trade))
def _sell(self, queue, trade):
remaining_sell_amount = trade.get_transaction(TransactionType.SELL).amount
items_bought = []
while remaining_sell_amount > Decimal('0'):
if self._is_empty(queue): # no bought items left but sell is not fully covered
items_bought.append(Item(remaining_sell_amount, None))
break
item = self._pop(queue, self.queue_type)
if remaining_sell_amount < item.amount: # sell amount is entirely covered by bought items
items_bought.append(Item(remaining_sell_amount, item.trade))
item.amount -= remaining_sell_amount
self._put_back(queue, self.queue_type, item)
break
elif remaining_sell_amount >= item.amount: # bought item is fully consumed by sell
items_bought.append(item)
remaining_sell_amount -= item.amount
return SellInfo(trade, items_bought)
@staticmethod
def _is_empty(queue):
return len(queue) == 0
@staticmethod
def _pop(queue, queue_type):
if queue_type == QueueType.FIFO:
item = queue.popleft()
else:
item = queue.pop()
return item
@staticmethod
def _put_back(queue, queue_type, item):
if queue_type == QueueType.FIFO:
queue.appendleft(item)
else:
queue.append(item)
@staticmethod
def _put(queue, item):
queue.append(item)
def __str__(self) -> str:
amounts = []
for currency in self.queues:
amounts.append(currency_to_string(self.get_balance('currency'), currency))
return f'{amounts}'
class Item:
"""
Represents an percentage of a an amount bought in the past, and the corresponding trade.
"""
def __init__(self, amount, trade):
"""
:param trade: corresponding trade or None if unaccounted
"""
self.amount = amount
self.trade = trade
@property
def percent(self):
"""
How much of the trade amount this item represents, in percent (0.0 - 1.0).
"""
if self.trade is None: # no trade means unaccounted
return Decimal('1.0') # 100 percent of unaccounted trade are relevant
return self.amount / self.trade.get_transaction(TransactionType.BUY).amount
@property
def cost(self):
"""
The cost of the item (converted to tax currency).
"""
if self.trade is None: # no trade means unaccounted
return Decimal('0.0') # unaccounted means no cost
return self.percent * self.trade.get_transaction(TransactionType.SELL).converted_amount
@property
def fee(self):
"""
The cost of the item (converted to tax currency).
"""
if self.trade is None: # no trade means unaccounted
return Decimal('0.0') # unaccounted means no fee
return self.percent * self.trade.get_transaction(TransactionType.FEE).converted_amount
| true |
33d664698aedf51ae0d93815dcb3cf414c7f9be8 | Python | zh414/python-core | /8/excise8-8.py | UTF-8 | 122 | 3.109375 | 3 | [] | no_license | #jie cheng
def jie(n):
s=1
print n,'! = ',
while n >= 1:
s = s*n
n = n-1
print s
jie(4) | true |
38c29c4c06f932c1ffaf39beaa977d157a200e6b | Python | GeekStudioHIT/PythonHack | /Python3Test/re/ReTest.py | UTF-8 | 456 | 2.90625 | 3 | [] | no_license | import re
# m = re.match('foo', 'foo')
# m = re.match('foo', 'seafood')
# m = re.search('foo', 'seafood')
# m = re.match('.abc', ' abc')
# if m is not None:
# print(m.group())
# pattern = '\w+@\w+\.com'
# print(re.match(pattern, 'nobody@gmail.com').group())
# pattern = '\w+@(\w+\.)?\w+\.com'
# print(re.match(pattern, 'nobody@www.gmail.com').group())
# print(re.match(pattern, 'nobody@gmail.com').group())
# print(re.search('cc', 'ccc').group())
# print(re.findall('cc', 'ccc')) | true |
9ae42c09ebb7728aa35e1b89801805a808a7e31b | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_155/1114.py | UTF-8 | 335 | 3.296875 | 3 | [] | no_license | t = int(raw_input())
x = 1
for _ in xrange(t):
smax, s = raw_input().split(" ")
smax = int(smax)
y = 0
total = 0
for i, si in enumerate([int(i) for i in s]):
if i >= total:
y += (i - total)
total += (i - total)
total += si
print "Case #{}: {}".format(x, y)
x += 1
| true |
6439e67095443b418268f89da89acf94103fccd0 | Python | Aasthaengg/IBMdataset | /Python_codes/p03239/s748239513.py | UTF-8 | 175 | 2.546875 | 3 | [] | no_license | N,T=map(int,input().split())
cost=10**9
for _ in range(N):
c,t=map(int,input().split())
if t<=T:
cost=min(cost,c)
ans=cost if cost!=10**9 else "TLE"
print(ans) | true |
1b8f1c4583aa178d129c88a4f69c541a6d3433f7 | Python | jonkoi/QNN-Evaluation | /nnUtils_ABC.py | UTF-8 | 19,898 | 2.734375 | 3 | [] | no_license | import tensorflow as tf
import math
from tensorflow.python.training import moving_averages
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.framework import ops
def binarize(x):
"""
Clip and binarize tensor using the straight through estimator (STE) for the gradient.
"""
g = tf.get_default_graph()
with ops.name_scope("Binarized") as name:
with g.gradient_override_map({"Sign": "Identity"}):
x=tf.clip_by_value(x,-1,1)
return tf.sign(x)
def get_mean_stddev(input_tensor):
with tf.name_scope('mean_stddev_cal'):
mean, variance = tf.nn.moments(input_tensor, axes=list(range(len(input_tensor.get_shape()))))
stddev = tf.sqrt(variance, name="standard_deviation")
return mean, stddev
# TODO: Allow shift parameters to be learnable
def get_shifted_stddev(stddev, no_filters):
with tf.name_scope('shifted_stddev'):
spreaded_deviation = -1. + (2./(no_filters - 1)) * tf.convert_to_tensor(list(range(no_filters)),
dtype=tf.float32)
return spreaded_deviation * stddev
def get_binary_filters(convolution_filters, no_filters, name=None):
with tf.name_scope(name, default_name="get_binary_filters"):
mean, stddev = get_mean_stddev(convolution_filters)
shifted_stddev = get_shifted_stddev(stddev, no_filters)
# Normalize the filters by subtracting mean from them
mean_adjusted_filters = convolution_filters - mean
# Tiling filters to match the number of filters
expanded_filters = tf.expand_dims(mean_adjusted_filters, axis=0, name="expanded_filters")
tiled_filters = tf.tile(expanded_filters, [no_filters] + [1] * len(convolution_filters.get_shape()),
name="tiled_filters")
# Similarly tiling spreaded stddev to match the shape of tiled_filters
expanded_stddev = tf.reshape(shifted_stddev, [no_filters] + [1] * len(convolution_filters.get_shape()),
name="expanded_stddev")
with tf.get_default_graph().gradient_override_map({"Sign": "Identity"}):
binarized_filters = tf.sign(tiled_filters + expanded_stddev, name="binarized_filters")
return binarized_filters
def alpha_training(convolution_filters, binary_filters, alphas, no_filters):
with tf.name_scope("alpha_training"):
reshaped_convolution_filters = tf.reshape(convolution_filters, [-1], name="reshaped_convolution_filters")
reshaped_binary_filters = tf.reshape(binary_filters, [no_filters, -1],
name="reshaped_binary_filters")
weighted_sum_filters = tf.reduce_sum(tf.multiply(alphas, reshaped_binary_filters),
axis=0, name="weighted_sum_filters")
# Defining loss
error = tf.square(reshaped_convolution_filters - weighted_sum_filters, name="alphas_error")
loss = tf.reduce_mean(error, axis=0, name="alphas_loss")
# Defining optimizer
training_op = tf.train.AdamOptimizer().minimize(loss, var_list=[alphas],
name="alphas_training_op")
return training_op, loss
def ApproxConvLayer(input_tensor, binary_filters, alphas, no_filters, strides=(1, 1), padding="SAME", name=None):
with tf.name_scope(name, "ApproxConv_Layer"):
# Reshaping alphas to match the input tensor
reshaped_alphas = tf.reshape(alphas,
shape=[no_filters] + [1] * len(input_tensor.get_shape()),
name="reshaped_alphas")
# Calculating convolution for each binary filter
approxConv_outputs = []
for index in range(no_filters):
# Binary convolution
this_conv = tf.nn.conv2d(input_tensor, binary_filters[index],
strides=(1,) + strides + (1,),
padding=padding)
approxConv_outputs.append(this_conv)
conv_outputs = tf.convert_to_tensor(approxConv_outputs, dtype=tf.float32,
name="conv_outputs")
# Summing up each of the binary convolution
ApproxConv_output = tf.reduce_sum(tf.multiply(conv_outputs, reshaped_alphas), axis=0)
return ApproxConv_output
def ABCSpatialConvolution(nOutputPlane, kW, kH, dW=1, dH=1,
padding='VALID', bias=True, reuse=None, name='BinarizedSpatialConvolution', no_filters_conv=5):
def abc_conv2d(x, is_training=True):
nInputPlane = x.get_shape().as_list()[3]
with tf.variable_op_scope([x], None, name, reuse=reuse):
w = tf.get_variable('weight', [kH, kW, nInputPlane, nOutputPlane], initializer=tf.contrib.layers.xavier_initializer_conv2d())
alphas_conv = tf.Variable(tf.random_normal(shape=(no_filters_conv, 1), mean=1.0, stddev=0.1),dtype=tf.float32, name="alphas_conv")
binary_filters_conv = get_binary_filters(w, no_filters_conv)
alpha_training_conv, alpha_loss_conv = alpha_training(tf.stop_gradient(w, "no_gradient_W_conv"),
tf.stop_gradient(binary_filters_conv, "no_gradient_binary_filters_conv"), alphas_conv, no_filters_conv)
# bin_w = binarize(w)
bin_x = binarize(x)
ApproxLayer = ApproxConvLayer(bin_x, binary_filters_conv, alphas_conv, no_filters_conv)
if bias:
b = tf.get_variable('bias', [nOutputPlane],initializer=tf.zeros_initializer)
ApproxLayer = tf.nn.bias_add(ApproxLayer, b)
# for i in range(no_filters_conv):
#
# tf.summary.histogram(name + '_bWeights_' + str(i), binary_filters_conv[i, :])
# tf.summary.histogram(name + '_bActivation_', bin_x)
return ApproxLayer, alpha_training_conv
return abc_conv2d
def ABCSpatialConvolutionFirst(nOutputPlane, kW, kH, dW=1, dH=1,
padding='VALID', bias=True, reuse=None, name='BinarizedSpatialConvolution', no_filters_conv=5):
def abc_conv2d(x, is_training=True):
nInputPlane = x.get_shape().as_list()[3]
with tf.variable_op_scope([x], None, name, reuse=reuse):
w = tf.get_variable('weight', [kH, kW, nInputPlane, nOutputPlane], initializer=tf.contrib.layers.xavier_initializer_conv2d())
alphas_conv = tf.Variable(tf.random_normal(shape=(no_filters_conv, 1), mean=1.0, stddev=0.1),dtype=tf.float32, name="alphas_conv")
binary_filters_conv = get_binary_filters(w, no_filters_conv)
alpha_training_conv, alpha_loss_conv = alpha_training(tf.stop_gradient(w, "no_gradient_W_conv"),
tf.stop_gradient(binary_filters_conv, "no_gradient_binary_filters_conv"), alphas_conv, no_filters_conv)
# bin_w = binarize(w)
ApproxLayer = ApproxConvLayer(x, binary_filters_conv, alphas_conv, no_filters_conv)
if bias:
b = tf.get_variable('bias', [nOutputPlane],initializer=tf.zeros_initializer)
ApproxLayer = tf.nn.bias_add(ApproxLayer, b)
# for i in range(no_filters_conv):
#
# tf.summary.histogram(name + '_bWeights_' + str(i), binary_filters_conv[i, :])
# tf.summary.histogram(name + '_bActivation_', bin_x)
return ApproxLayer, alpha_training_conv
return abc_conv2d
def BinarizedWeightOnlySpatialConvolution(nOutputPlane, kW, kH, dW=1, dH=1,
padding='VALID', bias=True, reuse=None, name='BinarizedWeightOnlySpatialConvolution'):
'''
This function is used only at the first layer of the model as we dont want to binarized the RGB images
'''
def bc_conv2d(x, is_training=True):
nInputPlane = x.get_shape().as_list()[3]
with tf.variable_op_scope([x], None, name, reuse=reuse):
w = tf.get_variable('weight', [kH, kW, nInputPlane, nOutputPlane],
initializer=tf.contrib.layers.xavier_initializer_conv2d())
bin_w = binarize(w)
out = tf.nn.conv2d(x, bin_w, strides=[1, dH, dW, 1], padding=padding)
if bias:
b = tf.get_variable('bias', [nOutputPlane],initializer=tf.zeros_initializer)
out = tf.nn.bias_add(out, b)
return out
return bc_conv2d
def SpatialConvolution(nOutputPlane, kW, kH, dW=1, dH=1,
padding='VALID', bias=True, reuse=None, name='SpatialConvolution'):
def conv2d(x, is_training=True):
nInputPlane = x.get_shape().as_list()[3]
with tf.variable_op_scope([x], None, name, reuse=reuse):
w = tf.get_variable('weight', [kH, kW, nInputPlane, nOutputPlane],
initializer=tf.contrib.layers.xavier_initializer_conv2d())
out = tf.nn.conv2d(x, w, strides=[1, dH, dW, 1], padding=padding)
if bias:
b = tf.get_variable('bias', [nOutputPlane],initializer=tf.zeros_initializer)
out = tf.nn.bias_add(out, b)
return out
return conv2d
def Affine(nOutputPlane, bias=True, name=None, reuse=None):
def affineLayer(x, is_training=True):
with tf.variable_op_scope([x], name, 'Affine', reuse=reuse):
reshaped = tf.reshape(x, [x.get_shape().as_list()[0], -1])
nInputPlane = reshaped.get_shape().as_list()[1]
w = tf.get_variable('weight', [nInputPlane, nOutputPlane], initializer=tf.contrib.layers.xavier_initializer())
output = tf.matmul(reshaped, w)
if bias:
b = tf.get_variable('bias', [nOutputPlane],initializer=tf.zeros_initializer)
output = tf.nn.bias_add(output, b)
return output
return affineLayer
def BinarizedAffine(nOutputPlane, bias=True, name=None, reuse=None):
def b_affineLayer(x, is_training=True):
with tf.variable_op_scope([x], name, 'Affine', reuse=reuse):
'''
Note that we use binarized version of the input (bin_x) and the weights (bin_w). Since the binarized function uses STE
we calculate the gradients using bin_x and bin_w but we update w (the full precition version).
'''
bin_x = binarize(x)
reshaped = tf.reshape(bin_x, [x.get_shape().as_list()[0], -1])
nInputPlane = reshaped.get_shape().as_list()[1]
w = tf.get_variable('weight', [nInputPlane, nOutputPlane], initializer=tf.contrib.layers.xavier_initializer())
bin_w = binarize(w)
output = tf.matmul(reshaped, bin_w)
if bias:
b = tf.get_variable('bias', [nOutputPlane],initializer=tf.zeros_initializer)
output = tf.nn.bias_add(output, b)
return output
return b_affineLayer
def BinarizedWeightOnlyAffine(nOutputPlane, bias=True, name=None, reuse=None):
def bwo_affineLayer(x, is_training=True):
with tf.variable_op_scope([x], name, 'Affine', reuse=reuse):
'''
Note that we use binarized version of the input (bin_x) and the weights (bin_w). Since the binarized function uses STE
we calculate the gradients using bin_x and bin_w but we update w (the full precition version).
'''
reshaped = tf.reshape(x, [x.get_shape().as_list()[0], -1])
nInputPlane = reshaped.get_shape().as_list()[1]
w = tf.get_variable('weight', [nInputPlane, nOutputPlane], initializer=tf.contrib.layers.xavier_initializer())
bin_w = binarize(w)
output = tf.matmul(reshaped, bin_w)
if bias:
b = tf.get_variable('bias', [nOutputPlane],initializer=tf.zeros_initializer)
output = tf.nn.bias_add(output, b)
return output
return bwo_affineLayer
def Linear(nInputPlane, nOutputPlane):
return Affine(nInputPlane, nOutputPlane, add_bias=False)
def wrapNN(f,*args,**kwargs):
def layer(x, scope='', is_training=True):
return f(x,*args,**kwargs)
return layer
def Dropout(p, name='Dropout'):
def dropout_layer(x, is_training=True):
with tf.variable_op_scope([x], None, name):
# def drop(): return tf.nn.dropout(x,p)
# def no_drop(): return x
# return tf.cond(is_training, drop, no_drop)
if is_training:
return tf.nn.dropout(x,p)
else:
return x
return dropout_layer
def ReLU(name='ReLU'):
def layer(x, is_training=True):
with tf.variable_op_scope([x], None, name):
return tf.nn.relu(x)
return layer
def HardTanh(name='HardTanh'):
def layer(x, is_training=True):
with tf.variable_op_scope([x], None, name):
return tf.clip_by_value(x,-1,1)
return layer
def View(shape, name='View'):
with tf.variable_op_scope([x], None, name, reuse=reuse):
return wrapNN(tf.reshape,shape=shape)
def SpatialMaxPooling(kW, kH=None, dW=None, dH=None, padding='VALID',
name='SpatialMaxPooling'):
kH = kH or kW
dW = dW or kW
dH = dH or kH
def max_pool(x,is_training=True):
with tf.variable_op_scope([x], None, name):
return tf.nn.max_pool(x, ksize=[1, kW, kH, 1], strides=[1, dW, dH, 1], padding=padding)
return max_pool
def SpatialAveragePooling(kW, kH=None, dW=None, dH=None, padding='VALID',
name='SpatialAveragePooling'):
kH = kH or kW
dW = dW or kW
dH = dH or kH
def avg_pool(x,is_training=True):
with tf.variable_op_scope([x], None, name):
return tf.nn.avg_pool(x, ksize=[1, kW, kH, 1], strides=[1, dW, dH, 1], padding=padding)
return avg_pool
def BatchNormalization(*kargs, **kwargs):
return wrapNN(tf.contrib.layers.batch_norm, *kargs, **kwargs)
def Sequential(moduleList):
def model(x, is_training=True):
# Create model
output = x
alphas_training_operations = []
#with tf.variable_op_scope([x], None, name):
for i,m in enumerate(moduleList):
output = m(output, is_training=is_training)
if type(output) is tuple:
output = output[0]
alphas_training_operations.append(output[1])
tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, output)
return output, alphas_training_operations
return model
def Concat(moduleList, dim=3):
def model(x, is_training=True):
# Create model
outputs = []
for i,m in enumerate(moduleList):
name = 'layer_'+str(i)
with tf.variable_op_scope([x], name, 'Layer', reuse=reuse):
outputs[i] = m(x, is_training=is_training)
output = tf.concat(dim, outputs)
return output
return model
def Residual(moduleList, name='Residual',fixShape=['pad',1,1]):
#fixShape:fixShape if input filters !=output filters
#params=['pad' or 'conv',stride,stride]
#'pad' or 'conv':fixshape method:conv1x1 or pooling1x1+padiing;
#stride:stride for pooling
m = Sequential(moduleList)
def model(x, is_training=True):
# Create model
with tf.variable_op_scope([x], None, name):
output=m(x,is_training=is_training)
with tf.variable_op_scope(None, 'fixShape', reuse=None):
filterIn=x.get_shape()[3]
filterOut=output.get_shape()[3]
if filterIn !=filterOut:
if fixShape[0]=='pad':
x=tf.nn.avg_pool(x, ksize=[1, 1, 1, 1], strides=[1, fixShape[1],fixShape[2],1],padding='VALID')
x=tf.pad(x,[[0, 0], [0, 0], [0, 0],[(filterOut-filterIn)//2, (filterOut-filterIn)//2]])
else:#conv method
w = tf.get_variable('weight', [1, 1, filterIn, filterOut],
initializer=tf.contrib.layers.xavier_initializer_conv2d())
x = tf.nn.conv2d(x, w, strides=[1, fixShape[1],fixShape[2], 1], padding='SAME')
output = tf.add(output, x)
return output
return model
def Residual_func(nOutputPlane, kW, kH, dW=1, dH=1,
padding='VALID', bias=True, name='Residual_func',reuse=None,fixShapeMethod='pad',type='basic',bottleWidth=2):
with tf.variable_op_scope(None,None, name, reuse=reuse):
if type=='basic':
curr_layers = [
SpatialConvolution(nOutputPlane,kW,kH,dW,dH, padding=padding,bias=bias),
BatchNormalization(),
ReLU(),
SpatialConvolution(nOutputPlane,kW,kH,1,1, padding=padding,bias=bias),
BatchNormalization()
]
elif type=='pre':
curr_layers = [
BatchNormalization(),
ReLU(),
SpatialConvolution(nOutputPlane,kW,kH,dW,dH, padding=padding,bias=bias),
BatchNormalization(),
ReLU(),
SpatialConvolution(nOutputPlane,kW,kH,1,1, padding=padding,bias=bias)
]
elif type=='bottleneck':
curr_layers = [
SpatialConvolution(nOutputPlane,1,1,1,1, padding='valid',bias=bias),
BatchNormalization(),
ReLU(),
SpatialConvolution(nOutputPlane,kW,kH,dW,dH, padding=padding,bias=bias),
BatchNormalization(),
ReLU(),
SpatialConvolution(nOutputPlane*bottleWidth,1,1,1,1, padding='valid',bias=bias),
BatchNormalization()
]
if type=='dropout':
curr_layers = [
SpatialConvolution(nOutputPlane,kW,kH,dW,dH, padding=padding,bias=bias),
ReLU(),
Dropout(0.5),
SpatialConvolution(nOutputPlane,kW,kH,1,1, padding=padding,bias=bias)
]
elif type=='prebottleneck':
curr_layers = [
BatchNormalization(),
ReLU(),
SpatialConvolution(nOutputPlane,1,1,1,1, padding='valid',bias=bias),
BatchNormalization(),
ReLU(),
SpatialConvolution(nOutputPlane,kW,kH,dW,dH, padding=padding,bias=bias),
BatchNormalization(),
ReLU(),
SpatialConvolution(nOutputPlane*bottleWidth,1,1,1,1, padding='valid',bias=bias)
]
modules = []
if 'pre' in type:
modules=[Residual(curr_layers,fixShape=[fixShapeMethod,dW,dH])]
else:
modules=[Residual(curr_layers,fixShape=[fixShapeMethod,dW,dH])]+[ReLU()]
return modules
def Block(nOutputPlane, kW, kH, dW=1, dH=1,K=10,N=4,padding='VALID', bias=True, name='Block',reuse=None,
fixShapeMethod='pad',bottleWidth=2):# K:Network Width;N:GroupNum
def model(x, is_training=True):
with tf.variable_op_scope([x],None,name,reuse=reuse):
modules = []
for i in xrange(0,N):
if i==0:
modules +=Residual_func(nOutputPlane*K,kW,kH,dW,dH,padding=padding,bias=bias,
reuse=reuse,fixShapeMethod=fixShapeMethod,bottleWidth=bottleWidth)
else:
modules += Residual_func(nOutputPlane*K,kW,kH,1,1,padding=padding,bias=bias,
reuse=reuse,fixShapeMethod=fixShapeMethod,bottleWidth=bottleWidth)
m=Sequential(modules)
output=m(x,is_training=is_training)
return output
return model
| true |
2a32938b66771087a974f5f3037ede1908537c34 | Python | nikky4D/Zero-Shot-Detection-via-Vision-and-Language-Knowledge-Distillation | /modules/load_data.py | UTF-8 | 2,958 | 3.015625 | 3 | [] | no_license | import os
import pickle
import numpy as np
from numpy.core.defchararray import array, decode
def load_feature(feature_path, label_path):
r"""
load features extracted from ResNet101 to a two demension array
it can create an .npy file containing all the feature and it can be load at a super fast speed,so using this function onec is enough
Parameter
--------
feature_path:path to feature.txt,each feature vector contains 2048 values
label_path:labels of classes
Returns
--------
sum_array:a combination of features and labels.
the beginning of each feature vector is the label number of th class
"""
feature_array = np.loadtxt(feature_path)
label_array = np.loadtxt(label_path).reshape((feature_array.shape[0], 1))
sum_array = np.concatenate((label_array, feature_array), axis=1)
print(sum_array[2, 0:])
np.save("feature.npy", sum_array)
return sum_array
def load_class(class_path):
r"""
load all the class names and cut the column of class number
Parameter
--------
feature_path:path to classes.txt
Returns
--------
class_array:a ndarray contains all the class names.
"""
class_array = np.loadtxt(class_path, dtype=str)
class_array = np.delete(class_array, 0, 1)
return class_array.reshape((50,))
def load_attribute(attribute_path):
r"""
load all the attributes vectors, each vector is 1x50 size. It means every class owns 50 attributes. e.g. striped, white etc
Parameter
--------
feature_path:path to attributes.txt
Returns
--------
class_array:a ndarray contains all the attributes vectors.
"""
attribute_array = np.loadtxt(attribute_path)
return attribute_array
def load_featureNpy(npy_path):
r"""
load features array at a high speed
Returns
--------
sum_array:a combination of features and labels.
the beginning of each feature vector is the label number of th class
"""
return np.load(npy_path)
def load_attriName(attriName_path):
r"""
load attribute names to an array
Returns
--------
sum_array:an array of attribute names
"""
attriName_array = np.loadtxt(attriName_path, dtype=str)
attriName_array = np.delete(attriName_array, 0, 1)
return attriName_array.reshape((85,))
def loadSVM(model_path):
return pickle.load(open(model_path, "rb"))
if __name__ == '__main__':
# load_class(
# "/home/llrt/文档/Animals_with_Attributes2/classes.txt")
# load_featureNpy("feature.npy")
# load_attribute(
# "/home/llrt/文档/Animals_with_Attributes2/predicate-matrix-binary.txt")
# print(load_attriName(
# "/home/llrt/文档/Animals_with_Attributes2/predicates.txt"))
load_feature("/home/llrt/文档/Animals_with_Attributes2/Features/ResNet101/AwA2-features.txt","/home/llrt/文档/Animals_with_Attributes2/Features/ResNet101/AwA2-labels.txt")
| true |
f74c33b7d94c81ec1cadfbb2f495336ad6632715 | Python | kaschaefer/proj5-karaoke | /karaoke/pre.py | UTF-8 | 830 | 3.359375 | 3 | [] | no_license | """
Pre-process POIs from a text file to load onto the map
"""
import logging
logging.basicConfig(format='%(levelname)s:%(message)s',
level=logging.INFO)
log = logging.getLogger(__name__)
def process(raw):
cooked = []
for line in raw:
x = {}
log.debug("Line: {}".format(line))
line = line.strip()
if len(line) == 0 or line[0] == "#":
log.debug("Skipping")
continue
elif len(line) != 0:
parts = line.split(",")
x[parts[0]] = [parts[1], parts[2]]
cooked.append(x)
else:
raise ValueError("Trouble with line: '{}'\n".format(line))
return cooked
def main():
f = open("data/POI.txt")
parsed = process(f)
print(parsed)
if __name__ == "__main__":
main()
| true |
7eb64b66f2321c7a33447d01171a9d4e58be3a15 | Python | seoul-ssafy-class-2-studyclub/GaYoung_SSAFY | /test/line_2020/programming4.py | UTF-8 | 3,367 | 3 | 3 | [] | no_license | from collections import deque
def solution(maze):
answer = 0
return answer
# maze = [[0, 1, 0, 1], [0, 1, 0, 0], [0, 0, 0, 0], [1, 0, 1, 0]]
# maze = [[0, 1, 0, 0, 0, 0], [0, 1, 0, 1, 1, 0], [0, 1, 0, 0, 1, 0], [0, 1, 1, 1, 1, 0], [0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 0]]
# maze = [[0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0]]
maze = [[0, 0, 0, 0, 0, 0], [1, 1, 1, 0, 1, 1], [0, 0, 0, 0, 0, 0], [1, 0, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0], [1, 1, 0, 1, 1, 0]]
len_maze = len(maze)
board = [[1]*(len_maze+2) for _ in range(len_maze+2)]
for i in range(len_maze):
for j in range(len_maze):
board[i+1][j+1] = maze[i][j]
# print(board)
# [[1, 1, 1, 1, 1, 1],
# [1, 0, 1, 0, 1, 1],
# [1, 0, 1, 0, 0, 1],
# [1, 0, 0, 0, 0, 1],
# [1, 1, 0, 1, 0, 1],
# [1, 1, 1, 1, 1, 1]]
q = deque([[1, 1, 0]])
while True:
x, y, cnt = q.popleft()
if x == len_maze and y == len_maze:
print(cnt)
break
if 0<= x-1 < len_maze+2 and 0<= y-1 < len_maze+2 and 0<= x+1 < len_maze+2 and 0<= y+1 < len_maze+2:
# 아래로
if board[x + 1][y] == 0 and board[x][y+1] == 1 and board[x+1][y+1] == 1:
q.append([x + 1, y, cnt + 1])
print('down')
# 아래한칸+오른쪽한칸
if board[x + 1][y] == 0 and board[x][y+1] == 1 and board[x+1][y+1] == 0:
q.append([x+1, y+1, cnt + 2])
print('down+right')
# 아래한칸+왼쪽한칸
if board[x][y-1] == 1 and board[x+1][y-1] == 0 and board[x + 1][y] == 0:
q.append([x + 1, y + 1, cnt + 2])
print('down+left')
# 오른쪽한칸 + 위로한칸
if board[x][y+1] == 0 and board[x-1][y] == 1 and board[x-1][y + 1] == 0:
q.append([x-1, y + 1, cnt + 2])
print('right+up')
# 위로
if board[x-1][y] == 0 and board[x][y-1] == 1 and board[x - 1][y -1] == 1:
q.append([x-1, y, cnt + 1])
print('up')
# 위로 + 왼쪽
if board[x-1][y] == 0 and board[x][y-1] == 1 and board[x - 1][y -1] == 0:
q.append([x - 1, y - 1, cnt + 2])
print('up+left')
# 위로 + 오른쪽
if board[x][y+1] == 1 and board[x-1][y] == 1 and board[x - 1][y + 1] == 0:
q.append([x - 1, y - 1, cnt + 2])
print('up+right')
# 왼쪽 + 아래로
if board[x][y-1] == 0 and board[x+1][y-1] == 0 and board[x+1][y] == 1:
q.append([x + 1, y - 1, cnt + 2])
print('up+left')
# 오른쪽
if board[x][y+1] == 0 and board[x+1][y] == 1 and board[x+1][y+1] == 1:
q.append([x, y+1, cnt + 1])
print('right')
# 오른쪽+아래
if board[x][y + 1] == 0 and board[x + 1][y] == 1 and board[x + 1][y + 1] == 0:
q.append([x+1, y + 1, cnt + 2])
print('right+down')
# 왼쪽
if board[x][y-1] == 0 and board[x+1][y] == 1 and board[x-1][y] == 1:
q.append([x, y - 1, cnt + 1])
print('left')
# 오른쪽+아래
if board[x][y + 1] == 0 and board[x + 1][y] == 1 and board[x + 1][y + 1] == 0:
q.append([x + 1, y + 1, cnt + 2])
print('left+down')
#
# print(q)
# print('------------------------------')
| true |
21dadec4337a331547c68afed7d1c3f1cc307251 | Python | vkuznet/WMCore | /test/python/WMCore_t/Services_t/UUID_t.py | UTF-8 | 1,373 | 2.75 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/env python
from __future__ import print_function
from builtins import str
import unittest
import time
from WMCore.Services.UUIDLib import makeUUID
class UUIDTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testUUID(self):
listOfIDs = []
tmpID = makeUUID()
splitID = None
splitID = tmpID.split('-')
for i in range(0,1000):
tmpID = makeUUID()
tmpSplit = tmpID.split('-')
self.assertNotEqual(tmpSplit[1], splitID[1], "Second component of UUID the same %s != %s"
% (tmpSplit[1], splitID[1]))
self.assertNotEqual(tmpSplit[4], splitID[4], "Fourth component of UUID the same %s != %s"
% (tmpSplit[4], splitID[4]))
self.assertEqual(type(tmpID), str)
self.assertEqual(listOfIDs.count(tmpID), 0, "UUID repeated! %s found in list %i times!"
% (tmpID, listOfIDs.count(tmpID)))
listOfIDs.append(tmpID)
return
def testTime(self):
nUIDs = 100000
startTime = time.time()
for i in range(0,nUIDs):
makeUUID()
print("We can make %i UUIDs in %f seconds" %(nUIDs, time.time() - startTime))
if __name__ == '__main__':
unittest.main()
| true |
8f4a40ea36305c514e395ac2ae8cd3eba469aff5 | Python | RobotNo42/old_coed | /project/python_fullstack/day21/test1.py | UTF-8 | 314 | 3.3125 | 3 | [] | no_license | from threading import Thread
import time
class MyThread(Thread):
def __init__(self, num):
super().__init__()
self.num = num
def run(self):
print("running on number:%s" % self.num)
time.sleep(3)
t1 = MyThread(56)
t2 = MyThread(78)
t1.start()
t2.start()
print("ending") | true |
ca66630affc3f93b5316fce769d5ea02b4b211c2 | Python | sammypudjianto/PythonLib | /Games/Platformer/asset_loader.py | UTF-8 | 2,120 | 2.796875 | 3 | [
"MIT"
] | permissive | import os
import re
import pygame as p
class AssetLoader():
"""
Scan asset folders and import images
"""
ASSETSPATH = './Assets/'
PNGEXT = '.png'
_instance = None
def __new__(cls):
if cls._instance is None:
cls._instance = super(AssetLoader, cls).__new__(cls)
if os.path.exists(AssetLoader.ASSETSPATH):
cls._instance.files = os.listdir(AssetLoader.ASSETSPATH)
else:
raise OSError('unable to find ' + AssetLoader.ASSETSPATH)
return cls._instance
def __load_sprites(self, file_filter):
player_sprites = []
regex = re.compile(file_filter)
filtered_files = list(filter(regex.search, self.files))
if len(filtered_files) > 1:
number_regex = re.compile(r'(\d+)')
filtered_files = sorted(
filtered_files,
key=lambda x: int(number_regex.search(x)[0])
)
for file in filtered_files:
file_relpath = self.ASSETSPATH + file
try:
player_sprites.append(p.image.load(file_relpath))
except p.error as message:
print('cannot load image:', file_relpath)
raise SystemExit(message)
return player_sprites
def load_walk_left_sprites(self):
return self.__load_sprites('L[0-9].png')
def load_walk_right_sprites(self):
return self.__load_sprites('R[0-9].png')
def load_background(self):
return self.__load_sprites('bg.jpg').pop()
def load_character(self):
return self.__load_sprites('standing.png')
def load_enemy_walk_left_sprites(self):
return self.__load_sprites('L[0-9]{1,2}E.png')
def load_enemy_walk_right_sprites(self):
return self.__load_sprites('R[0-9]{1,2}E.png')
def load_bullet_sound(self):
return p.mixer.Sound(self.ASSETSPATH + 'bullet.mp3')
def load_hit_sound(self):
return p.mixer.Sound(self.ASSETSPATH + 'hit.mp3')
def load_music(self):
return p.mixer.Sound(self.ASSETSPATH + 'music.mp3')
| true |
76a173e73b9bc71f4e0dcf0c1442ec5ea535936d | Python | wxqhphy/udacity | /find_lane_lines/color_selection.py | UTF-8 | 1,582 | 2.796875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Apr 20 09:56:33 2019
@author: wxq
"""
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
image = mpimg.imread('test.jpg')
print('This image is:',type(image), 'with dimensions:', image.shape)
ysize = image.shape[0]
xsize = image.shape[1]
color_select = np.copy(image)
line_image = np.copy(image)
red_threshold = 200
green_threshold = 200
blue_threshold = 200
rgb_threshold = [red_threshold,green_threshold,blue_threshold]
left_bottom = [100,720]
right_bottom = [1100,720]
apex = [650,430]
fit_left =np.polyfit((left_bottom[0],apex[0]),(left_bottom[1],apex[1]),1)
fit_right =np.polyfit((right_bottom[0],apex[0]),(right_bottom[1],apex[1]),1)
fit_bottom =np.polyfit((left_bottom[0],right_bottom[0]),(left_bottom[1],right_bottom[1]),1)
color_thresholds = (image[:,:,0]<rgb_threshold[0])\
|(image[:,:,1]<rgb_threshold[1])\
|(image[:,:,2]<rgb_threshold[2])
XX,YY = np.meshgrid(np.arange(0,xsize),np.arange(0,ysize))
region_thresholds = (YY>(XX*fit_left[0]+fit_left[1])) & \
(YY>(XX*fit_right[0]+fit_right[1])) & \
(YY<(XX*fit_bottom[0]+fit_bottom[1]))
color_select[color_thresholds | ~region_thresholds] = [0,0,0]
line_image[~color_thresholds & region_thresholds] = [255,0,0]
plt.imshow(image)
x = [left_bottom[0],right_bottom[0],apex[0],left_bottom[0]]
y = [left_bottom[1],right_bottom[1],apex[1],left_bottom[1]]
plt.plot(x,y,'b--',lw=1)
#plt.imshow(color_select)
plt.imshow(line_image)
plt.show()
mpimg.imsave('test_after.png',line_image) | true |
09893514ea14f059c651988f42a0099b40f20e51 | Python | Eduardo271087/python-udemy-activities | /section-10/multiple-inheritance.py | UTF-8 | 521 | 4 | 4 | [] | no_license | class Primera:
def __init__(self):
print("Yo soy la primera clase")
def primera(self):
print("Este es el método heredado de Primera")
class Segunda:
def __init__(self):
print("Yo soy la segunda clase")
def segunda(self):
print("Este es el método heredado de Segunda")
class Tercera(Primera, Segunda):
def tercera(self):
print("Este es el método heredado de Tercera")
herencia_multiple = Tercera()
herencia_multiple.primera()
herencia_multiple.segunda()
herencia_multiple.tercera() | true |
9ae888815014da16952c953dbcb8f391d0e05ff8 | Python | vishalbelsare/torchsde | /examples/cont_ddpm.py | UTF-8 | 11,421 | 2.6875 | 3 | [
"Apache-2.0"
] | permissive | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A min example for continuous-time Denoising Diffusion Probabilistic Models.
Trains the backward dynamics to be close to the reverse of a fixed forward
dynamics via a score-matching-type objective.
Trains a simple model on MNIST and samples from both the reverse ODE and
SDE formulation.
To run this file, first run the following to install extra requirements:
pip install kornia
pip install einops
pip install torchdiffeq
pip install fire
To run, execute:
python -m examples.cont_ddpm
"""
import abc
import logging
import math
import os
import fire
import torch
import torchdiffeq
import torchvision as tv
import tqdm
from torch import nn, optim
from torch.utils import data
import torchsde
from . import unet
def fill_tail_dims(y: torch.Tensor, y_like: torch.Tensor):
"""Fill in missing trailing dimensions for y according to y_like."""
return y[(...,) + (None,) * (y_like.dim() - y.dim())]
class Module(abc.ABC, nn.Module):
"""A wrapper module that's more convenient to use."""
def __init__(self):
super(Module, self).__init__()
self._checkpoint = False
def zero_grad(self) -> None:
for p in self.parameters(): p.grad = None
@property
def device(self):
return next(self.parameters()).device
class ScoreMatchingSDE(Module):
"""Wraps score network with analytical sampling and cond. score computation.
The variance preserving formulation in
Score-Based Generative Modeling through Stochastic Differential Equations
https://arxiv.org/abs/2011.13456
"""
def __init__(self, denoiser, input_size=(1, 28, 28), t0=0., t1=1., beta_min=.1, beta_max=20.):
super(ScoreMatchingSDE, self).__init__()
if t0 > t1:
raise ValueError(f"Expected t0 <= t1, but found t0={t0:.4f}, t1={t1:.4f}")
self.input_size = input_size
self.denoiser = denoiser
self.t0 = t0
self.t1 = t1
self.beta_min = beta_min
self.beta_max = beta_max
def score(self, t, y):
if isinstance(t, float):
t = y.new_tensor(t)
if t.dim() == 0:
t = t.repeat(y.shape[0])
return self.denoiser(t, y)
def _beta(self, t):
return self.beta_min + t * (self.beta_max - self.beta_min)
def _indefinite_int(self, t):
"""Indefinite integral of beta(t)."""
return self.beta_min * t + .5 * t ** 2 * (self.beta_max - self.beta_min)
def analytical_mean(self, t, x_t0):
mean_coeff = (-.5 * (self._indefinite_int(t) - self._indefinite_int(self.t0))).exp()
mean = x_t0 * fill_tail_dims(mean_coeff, x_t0)
return mean
def analytical_var(self, t, x_t0):
analytical_var = 1 - (-self._indefinite_int(t) + self._indefinite_int(self.t0)).exp()
return analytical_var
@torch.no_grad()
def analytical_sample(self, t, x_t0):
mean = self.analytical_mean(t, x_t0)
var = self.analytical_var(t, x_t0)
return mean + torch.randn_like(mean) * fill_tail_dims(var.sqrt(), mean)
@torch.no_grad()
def analytical_score(self, x_t, t, x_t0):
mean = self.analytical_mean(t, x_t0)
var = self.analytical_var(t, x_t0)
return - (x_t - mean) / fill_tail_dims(var, mean).clamp_min(1e-5)
def f(self, t, y):
return -0.5 * self._beta(t) * y
def g(self, t, y):
return fill_tail_dims(self._beta(t).sqrt(), y).expand_as(y)
def sample_t1_marginal(self, batch_size, tau=1.):
return torch.randn(size=(batch_size, *self.input_size), device=self.device) * math.sqrt(tau)
def lambda_t(self, t):
return self.analytical_var(t, None)
def forward(self, x_t0, partitions=1):
"""Compute the score matching objective.
Split [t0, t1] into partitions; sample uniformly on each partition to reduce gradient variance.
"""
u = torch.rand(size=(x_t0.shape[0], partitions), dtype=x_t0.dtype, device=x_t0.device)
u.mul_((self.t1 - self.t0) / partitions)
shifts = torch.arange(0, partitions, device=x_t0.device, dtype=x_t0.dtype)[None, :]
shifts.mul_((self.t1 - self.t0) / partitions).add_(self.t0)
t = (u + shifts).reshape(-1)
lambda_t = self.lambda_t(t)
x_t0 = x_t0.repeat_interleave(partitions, dim=0)
x_t = self.analytical_sample(t, x_t0)
fake_score = self.score(t, x_t)
true_score = self.analytical_score(x_t, t, x_t0)
loss = (lambda_t * ((fake_score - true_score) ** 2).flatten(start_dim=1).sum(dim=1))
return loss
class ReverseDiffeqWrapper(Module):
"""Wrapper of the score network for odeint/sdeint.
We split this module out, so that `forward` of the score network is solely
used for computing the score, and the `forward` here is used for odeint.
Helps with data parallel.
"""
noise_type = "diagonal"
sde_type = "stratonovich"
def __init__(self, module: ScoreMatchingSDE):
super(ReverseDiffeqWrapper, self).__init__()
self.module = module
# --- odeint ---
def forward(self, t, y):
return -(self.module.f(-t, y) - .5 * self.module.g(-t, y) ** 2 * self.module.score(-t, y))
# --- sdeint ---
def f(self, t, y):
y = y.view(-1, *self.module.input_size)
out = -(self.module.f(-t, y) - self.module.g(-t, y) ** 2 * self.module.score(-t, y))
return out.flatten(start_dim=1)
def g(self, t, y):
y = y.view(-1, *self.module.input_size)
out = -self.module.g(-t, y)
return out.flatten(start_dim=1)
# --- sample ---
def sample_t1_marginal(self, batch_size, tau=1.):
return self.module.sample_t1_marginal(batch_size, tau)
@torch.no_grad()
def ode_sample(self, batch_size=64, tau=1., t=None, y=None, dt=1e-2):
self.module.eval()
t = torch.tensor([-self.t1, -self.t0], device=self.device) if t is None else t
y = self.sample_t1_marginal(batch_size, tau) if y is None else y
return torchdiffeq.odeint(self, y, t, method="rk4", options={"step_size": dt})
@torch.no_grad()
def ode_sample_final(self, batch_size=64, tau=1., t=None, y=None, dt=1e-2):
return self.ode_sample(batch_size, tau, t, y, dt)[-1]
@torch.no_grad()
def sde_sample(self, batch_size=64, tau=1., t=None, y=None, dt=1e-2, tweedie_correction=True):
self.module.eval()
t = torch.tensor([-self.t1, -self.t0], device=self.device) if t is None else t
y = self.sample_t1_marginal(batch_size, tau) if y is None else y
ys = torchsde.sdeint(self, y.flatten(start_dim=1), t, dt=dt)
ys = ys.view(len(t), *y.size())
if tweedie_correction:
ys[-1] = self.tweedie_correction(self.t0, ys[-1], dt)
return ys
@torch.no_grad()
def sde_sample_final(self, batch_size=64, tau=1., t=None, y=None, dt=1e-2):
return self.sde_sample(batch_size, tau, t, y, dt)[-1]
def tweedie_correction(self, t, y, dt):
return y + dt ** 2 * self.module.score(t, y)
@property
def t0(self):
return self.module.t0
@property
def t1(self):
return self.module.t1
def preprocess(x, logit_transform, alpha=0.95):
if logit_transform:
x = alpha + (1 - 2 * alpha) * x
x = (x / (1 - x)).log()
else:
x = (x - 0.5) * 2
return x
def postprocess(x, logit_transform, alpha=0.95, clamp=True):
if logit_transform:
x = (x.sigmoid() - alpha) / (1 - 2 * alpha)
else:
x = x * 0.5 + 0.5
return x.clamp(min=0., max=1.) if clamp else x
def make_loader(
root="./data/mnist",
train_batch_size=128,
shuffle=True,
pin_memory=True,
num_workers=0,
drop_last=True
):
"""Make a simple loader for training images in MNIST."""
def dequantize(x, nvals=256):
"""[0, 1] -> [0, nvals] -> add uniform noise -> [0, 1]"""
noise = x.new().resize_as_(x).uniform_()
x = x * (nvals - 1) + noise
x = x / nvals
return x
train_transform = tv.transforms.Compose([tv.transforms.ToTensor(), dequantize])
train_data = tv.datasets.MNIST(root, train=True, transform=train_transform, download=True)
train_loader = data.DataLoader(
train_data,
batch_size=train_batch_size,
drop_last=drop_last,
shuffle=shuffle,
pin_memory=pin_memory,
num_workers=num_workers
)
return train_loader
def main(
train_dir="./dump/cont_ddpm/",
epochs=100,
lr=1e-4,
batch_size=128,
pause_every=1000,
tau=1.,
logit_transform=True,
):
"""Train and sample once in a while.
Args:
train_dir: Path to a folder to dump things.
epochs: Number of training epochs.
lr: Learning rate for Adam.
batch_size: Batch size for training.
pause_every: Log and write figures once in this many iterations.
tau: The temperature for sampling.
logit_transform: Applies the typical logit transformation if True.
"""
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Data.
train_loader = make_loader(root=os.path.join(train_dir, 'data'), train_batch_size=batch_size)
# Model + optimizer.
denoiser = unet.Unet(
input_size=(1, 28, 28),
dim_mults=(1, 2, 4,),
attention_cls=unet.LinearTimeSelfAttention,
)
forward = ScoreMatchingSDE(denoiser=denoiser).to(device)
reverse = ReverseDiffeqWrapper(forward)
optimizer = optim.Adam(params=forward.parameters(), lr=lr)
def plot(imgs, path):
assert not torch.any(torch.isnan(imgs)), "Found nans in images"
os.makedirs(os.path.dirname(path), exist_ok=True)
imgs = postprocess(imgs, logit_transform=logit_transform).detach().cpu()
tv.utils.save_image(imgs, path)
global_step = 0
for epoch in range(epochs):
for x, _ in tqdm.tqdm(train_loader):
forward.train()
forward.zero_grad()
x = preprocess(x.to(device), logit_transform=logit_transform)
loss = forward(x).mean(dim=0)
loss.backward()
optimizer.step()
global_step += 1
if global_step % pause_every == 0:
logging.warning(f'global_step: {global_step:06d}, loss: {loss:.4f}')
img_path = os.path.join(train_dir, 'ode_samples', f'global_step_{global_step:07d}.png')
ode_samples = reverse.ode_sample_final(tau=tau)
plot(ode_samples, img_path)
img_path = os.path.join(train_dir, 'sde_samples', f'global_step_{global_step:07d}.png')
sde_samples = reverse.sde_sample_final(tau=tau)
plot(sde_samples, img_path)
if __name__ == "__main__":
fire.Fire(main)
| true |
dcfa3c162a5f243d67a42ccdb36736fcbc043020 | Python | mrgrit/Tensorflow | /tensorboard_test.py | UTF-8 | 1,472 | 2.71875 | 3 | [] | no_license | import tensorflow as tf
import numpy as np
x = [[0,0], [0,1], [1,0], [1,1]]
y = [[0], [0], [0], [1]]
learning_rate = 0.01
X = tf.placeholder(tf.float32, [None, 2], name="X-input")
Y = tf.placeholder(tf.float32, [None, 1], name="Y-input")
with tf.name_scope("Layer") as scope:
W = tf.Variable(tf.random_uniform([2, 1], -1.0, 1.0), name='weight')
B = tf.Variable(tf.zeros([1]), name='bias')
L = tf.sigmoid(tf.matmul(X, W) + B)
with tf.name_scope("Cost") as scope:
cost = -tf.reduce_mean(Y * tf.log(L)+ (1-Y)*tf.log(1-L))
cost_sum = tf.summary.scalar("Cost", cost)
with tf.name_scope("Train") as scope:
train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
with tf.name_scope("Accuracy") as scope:
predicted = tf.cast(L > 0.5, dtype=tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32))
accuracy_sum = tf.summary.scalar("Accuracy", accuracy)
init = tf.global_variables_initializer()
with tf.Session() as sess:
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter("./logs/and_01")
writer.add_graph(sess.graph)
sess.run(init)
for step in range(100):
summary, _ = sess.run([merged, train], feed_dict={X: x, Y: y})
writer.add_summary(summary, step)
if step % 10 == 0:
print(step, sess.run(cost, feed_dict={X: x, Y: y}), sess.run([W]))
print(sess.run(accuracy, feed_dict={X: x, Y: y}))
| true |
fee8834e4f8e9510ea6a17db096535d78bf75087 | Python | q36762000/280201102 | /lab4/example3.py | UTF-8 | 75 | 3.25 | 3 | [] | no_license | nums = [8, 60, 43, 55, 25, 134, 1]
x = 0
for i in nums:
x += i
print(x) | true |
27a3d52ee6eb0382840650734d38592f2fb216a9 | Python | zcding001/stroll | /script/utils/file_util.py | UTF-8 | 6,034 | 2.921875 | 3 | [] | no_license | #!/usr/bin/python3
# -*- coding: UTF-8 -*-
# author : zc.ding@foxmail.com
# desc : 文件操作工具类
import re
import os
import logging
import shutil
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s')
def create_file(file_path, content=""):
"""
创建文件
:param file_path: 文件路径
:param content: 需要写入的文件内容
:return: None
"""
path = os.path.abspath(file_path)
if not os.path.exists(path) or os.path.isfile(path):
file = open(path, 'w', encoding="UTF-8")
file.write(content)
file.close()
def append_file(file_path, content=""):
"""
追加内容到指定文件
:param file_path: 文件路径
:param content: 需要写入的文件内容
:return: None
"""
path = os.path.abspath(file_path)
if not os.path.exists(path) or os.path.isfile(path):
file = open(path, 'a', encoding="UTF-8")
file.write(content)
file.close()
def read_file(file_path):
"""
读取文件内容
:param file_path: 文件路径
:return: 文件所有内容
"""
logging.debug("read file file_path: " + file_path)
file = open(file_path, 'r', encoding="UTF-8")
content = ""
for line in file:
content += line
file.close()
return content
def replace(file_path, old_params, new_params):
"""
替换文件中的关键字
:param file_path: 文件路径
:param old_params: 需要替换的旧的关键字
:param new_params: 新的值
:return: void
"""
logging.info("file file_path: " + file_path + "; old_params: " + ", ".join(old_params) + "; new_params: " + ", ".join(new_params))
if len(old_params) <= 0 or len(new_params) <= 0:
logging.error("can't find replace params.")
return
file = open(file_path, 'r', encoding="UTF-8")
content = ""
for line in file:
for param in old_params:
if re.search(param, line):
line = re.sub(param, new_params[old_params.index(param)], line)
content += line
file.close()
write_file = open(file_path, 'w', encoding="UTF-8")
write_file.write(content)
write_file.close()
def list_files(file_path, root_name="", child=True):
"""
查询路径下文件列表
:param file_path: 根路径
:param root_name: 子路径名称
:param child: 是否包括子文件
:return: 文件列表绝对路径的集合
"""
file_lists = []
result = []
prefix_list = []
# 加载所有文件
for root, dirs, files in os.walk(file_path):
if len(root_name) > 0 and os.path.basename(root) == root_name:
prefix_list.append(root)
for f in files:
file_lists.append(root + os.path.sep + f)
# 过滤掉不是root_name下的文件
if len(prefix_list) > 0:
for p in prefix_list:
for f in file_lists:
# if not child and f.replace(p, "").count(os.path.sep) <= 0:
if f.startswith(p):
result.append(f)
file_lists = result
result = []
# 过滤掉child文件
if not child:
__tmp = file_path
if len(prefix_list) > 0:
__tmp = prefix_list.pop()
for f in file_lists:
if f.replace(__tmp, "").count(os.path.sep) <= 1:
result.append(f)
file_lists = result
logging.debug(file_lists)
return file_lists
def get_parent_path(path):
"""
获取path的父路径
:param path: 路径
:return: 路径
"""
return os.path.abspath(os.path.join(os.path.abspath(path), os.path.pardir))
def make_dirs(file_path):
"""
创建路径
:param file_path: 路径
:return: None
"""
if not os.path.exists(file_path) or not os.path.isdir(file_path):
os.makedirs(file_path)
def copy_file(src_file_path, dst_path, only_parent=False):
"""
拷贝文件,如果dst_path不存在,就创建路径
:param src_file_path: 源文件路径
:param dst_path: 目标地址
:param only_parent: 只创建到dst_path路径
:return: None
"""
logging.info("copy file from [%s] to [%s]", src_file_path, dst_path)
if only_parent:
make_dirs(get_parent_path(dst_path))
else:
make_dirs(dst_path)
shutil.copy(src_file_path, dst_path)
def copy_path(src_path, dst_path, remove=False):
"""
将src_path下的资源复制到dst_path
:param src_path: 源路径
:param dst_path: 目的路径
:param remove: 删除目的路径
:return: None
"""
logging.info("copy file from [%s] to [%s]", src_path, dst_path)
if remove and os.path.exists(dst_path) and os.path.isdir(dst_path):
shutil.rmtree(dst_path)
# shutil.copytree(src_path, dst_path)
for root, dirs, files in os.walk(src_path):
for f in files:
f_tmp = root + os.path.sep + f
logging.info("文件路径: %s", f_tmp)
dst_f = dst_path + f_tmp.replace(src_path, "")
copy_file(f_tmp, dst_f, only_parent=True)
def del_path(path, *file_name):
"""
删除文件夹或是文件中指定文件
:param path: 路径
:param file_name: 具体文件名称
:return: None
"""
logging.warning("del path: " + path + ", special file list is: ")
logging.warning(file_name)
path = os.path.abspath(path)
if path == "/" or path == "/home/develop/.jenkins" or path == "/data/www/projects":
logging.info("path /, /data/www/projects, /home/develop/.jenkins is not allowed to be deleted.")
return
if os.path.exists(path) and os.path.isdir(path):
if len(file_name) > 0:
for root, dirs, files in os.walk(path):
for a in files:
for b in file_name:
if a == b:
os.remove(os.path.abspath(os.path.join(root, a)))
else:
shutil.rmtree(path)
| true |
6e1f0d2a1ba5696dd5ec5e08142866ce90b06c11 | Python | Khangaroooo/ITI1120 | /A5_300007277/a5_part1_300007277.py | UTF-8 | 1,143 | 4 | 4 | [] | no_license | def largest_34(a):
'''
(List) -> int
returns the sum of the 3rd and 4th largest values in
the list a
'''
a.sort(reverse= True)
return (sum(a[2:4]))
def largest_third(a):
'''
(List) -> int
computes the sum of the len(a)//3 of the
largest values in the list a
'''
a.sort(reverse= True)
return (sum(a[:((len(a)//3))]))
def third_at_least(a):
'''
(List) -> int
returns a value in a that occurs at least
len(a)//3 + 1 times. If no such element exists in a, then this function returns None. If more than
one such element exists, you should return the smaller one
'''
a.sort()
c = (len(a)//3)
for i in range (len(a)-c):
if a[i] == a[i+c]:
return a[i]
return None
def sum_tri(a,x):
'''
(List, int) -> boolean
returns True if there
exists indices i, j and k (where i and j and k are not necessarily distinct) such that a[i]+a[j]+a[k]=x.
Otherwise it returns False.
'''
sorted(set(a))
for i in range(len(a)):
for e in range(len(a)):
for t in range(len(a)):
if a[i]+a[e]+a[t] == x:
return True
return False
| true |
df4c4ce3939918c17b2de93d6d4e9572af3fd226 | Python | komuro-zero/get-trade-data | /bitflyer_csv.py | UTF-8 | 3,789 | 2.515625 | 3 | [] | no_license | from __future__ import unicode_literals, print_function
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pybitflyer
import time
import pytz
from quoine.client import Quoinex
from datetime import datetime, timezone, timedelta
import bitmex
import csv
import os
class bitflyer_BTCJPY():
def convert_time(self,time):
new_time = time[:10]+ " " + time[11:]
altered_time = datetime.strptime(new_time[:19],'%Y-%m-%d %H:%M:%S') + timedelta(hours = 9)
return altered_time
def bitflyer_quantify_executions(self,executions):
price = []
date = []
for execution in executions:
this_time = self.convert_time(execution["exec_date"])
price.append(execution["price"])
date.append(this_time)
return price, date
def run(self,now,yesterday,bitflyer_sleep_time,product_codes,before_id = None):
bitflyer_api = pybitflyer.API()
yesterday = yesterday.replace(tzinfo=None)
now = now.replace(tzinfo=None)
#first get the execution id for the most recent transaction made
price = []
date = []
count =0
#based on the timestamp, get the transaction for last 500 transactions. continue to do so with the last transaction id for each iteration until you reach the next day.
flag = True
while flag:
if not before_id:
executions = bitflyer_api.executions(product_code = product_codes, count = 500)
else:
executions = bitflyer_api.executions(product_code = product_codes,before = before_id, count = 500)
before_id = executions[-1]["id"]
price, date = self.bitflyer_quantify_executions(executions)
if date[0] < now:
csv_data = []
for i in range(len(price)):
csv_data.append([date[i],price[i]])
last_day = date[0]
if yesterday > last_day:
flag = False
else:
os.makedirs("./csv_files/", exist_ok=True)
with open(f"./csv_files/bitflyer_{product_codes}_{str(yesterday)[:4]+str(yesterday)[5:7]+str(yesterday)[8:10]}.csv","a") as f:
writer = csv.writer(f,lineterminator='\n')
for data in csv_data:
writer.writerow(data)
print(f"bitflyer, date: {date[0]} id: {before_id}")
time.sleep(bitflyer_sleep_time)
print(date[0],now)
count +=1
def test_run(self,now,yesterday):
product_codes = "FX_BTC_JPY"
bitflyer_api = pybitflyer.API()
yesterday = yesterday.replace(tzinfo=None)
now = now.replace(tzinfo=None)
before_id= None
#first get the execution id for the most recent transaction made
price = []
date = []
count =0
#based on the timestamp, get the transaction for last 500 transactions. continue to do so with the last transaction id for each iteration until you reach the next day.
flag = True
while flag:
if not before_id:
executions = bitflyer_api.executions(product_code = product_codes, count = 3)
else:
print(before_id)
executions = bitflyer_api.executions(product_code = product_codes,after = before_id, count = 3)
before_id = executions[0]["id"]
time.sleep(2)
for row in executions:
print(before_id,row)
count +=1
if __name__ == "__main__":
bitflyer = bitflyer_BTCJPY()
now = datetime.now()
yesterday = now -timedelta(days = 1)
bitflyer.test_run(now,yesterday) | true |
90285c623c739740f06e4ccd160df9946cee80d9 | Python | rapid7/insightconnect-plugins | /plugins/ipinfo/icon_ipinfo/actions/ip_lookup/action.py | UTF-8 | 904 | 2.640625 | 3 | [
"MIT"
] | permissive | import komand
from .schema import IpLookupInput, IpLookupOutput, Input
# Custom imports below
import requests
class IpLookup(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="ip_lookup",
description="Lookup IP Address Information",
input=IpLookupInput(),
output=IpLookupOutput(),
)
def run(self, params={}):
# Set variables
ip = params.get(Input.IP)
token = self.connection.token
server = self.connection.domain
# Check if token is provided and set correct URL
if token:
url = f"{server}{ip}/json?token={token}"
self.logger.info("API token was provided by user")
else:
url = f"{server}{ip}/json"
# Make request
request = requests.get(url)
dic = request.json()
return dic
| true |
7fda72a52ba2ffcc768805ac315c626caa420a05 | Python | wdczz/APF_Swarm_Control_Simulator | /python_code/Quadrotor/Bird.py | UTF-8 | 510 | 2.921875 | 3 | [
"MIT"
] | permissive | import numpy as np
import sys
sys.path.append('../')
from Obstacle import Obstacle
class Bird(object):
def __init__(self, initialPosition):
self.position = np.array(initialPosition)
def getBodyPosition(self):
return np.array([self.position, self.position], dtype="object")
def connectToSwarmController(self, SwarmController):
self.ObstacleAgent = Obstacle((self.position[0],self.position[1], self.position[2]))
SwarmController.addObstacle(self.ObstacleAgent) | true |
88f44530bbfa544a034906edb9f8e8126432448d | Python | kevin41307/Python | /Decorator/Decorator_and_Logging.py | UTF-8 | 846 | 3.390625 | 3 | [] | no_license | #!/usr/bin/python
'''
利用Decorator與Logging
紀錄程式執行經過
'''
import logging
import time
def Big(func):
def Mid(*args,**kwargs):
logger = logging.getLogger('decorator')
logger.setLevel(logging.INFO)
f_handle = logging.FileHandler("/tmp/test")
formatter = logging.Formatter('%(asctime)s %(name)s %(message)s',datefmt='%Y-%m-%d %H:%M:%S')
f_handle.setFormatter(formatter)
logger.addHandler(f_handle)
print 'start of :'+func.__name__
logger.info('start of :'+func.__name__)
func(*args,**kwargs) #your function work here
logger.info(func.__name__+'('+ ' '.join(str(i) for i in args) +')')
print " end of :"+func.__name__
logger.info('end of :'+func.__name__)
return Mid
@Big
def add(x,y):
print"x+y=",x+y
add(4,6)
| true |
940cadfc6f61551f7a4f80c16c5f759a731698ad | Python | ameya-salankar/similarities | /helpers.py | UTF-8 | 1,824 | 3.46875 | 3 | [] | no_license | from nltk.tokenize import sent_tokenize
def lines(a, b):
"""Return lines in both a and b"""
li = []
set_a = set([])
set_b = set([])
st = ""
t = 0
ln_a = len(a)
ln_b = len(b)
for i in a:
t += 1
if (i == '\n' or t == ln_a):
if t == ln_a:
st += i
set_a.add(st)
st = ''
else:
st += i
st = ''
t = 0
for i in b:
t += 1
if (i == '\n' or t == ln_b):
if t == ln_b:
st += i
set_b.add(st)
st = ''
else:
st += i
for i in set_a:
for j in set_b:
if i == j:
li.append(i)
return li
def sentences(a, b):
"""Return sentences in both a and b"""
# a.decode("utf8")
st = set([])
li_a = sent_tokenize(a)
li_b = sent_tokenize(b)
for i in li_a:
for j in li_b:
if i == j:
st.add(i)
return list(st)
def substrings(a, b, n):
"""Return substrings of length n in both a and b"""
set_a = set([])
set_b = set([])
sets = set([])
ln_a = len(a)
ln_b = len(b)
i = 0
while i < ln_a:
st = ''
cnt = 0
k = i
while(cnt != n and k < ln_a):
st += a[k]
cnt += 1
k += 1
if len(st) == n:
set_a.add(st)
i += 1
i = 0
while i < ln_b:
st = ''
cnt = 0
k = i
while(cnt != n and k < ln_b):
st += b[k]
cnt += 1
k += 1
if len(st) == n:
set_b.add(st)
i += 1
for i in set_a:
for j in set_b:
if i == j:
sets.add(i)
return list(sets) | true |
7d2837e90f1b11b316f751aa066d904e78bdb565 | Python | Holmes-pengge/asyncio_demo | /domain_test/domain_test_v1.0.2.py | UTF-8 | 1,803 | 2.640625 | 3 | [] | no_license | import json
import socket
import asyncio
from pythonping import ping
import time
"""
{
"domains": [{
"url": "www.baidu.com",
"isalvie": 0,
"finalurl": ""
}, {
"url": "www.sina.com",
"isalvie": 1,
"finalurl": "https://www.sina.com"
}]
}
"""
# def ping_domain(domain):
# resp = ping(domain, count=4, size=10)
# print(resp)
# if 'Round Trip Times min/avg/max is' in resp:
async def executor(host):
item = {}
try:
ip = socket.gethostbyname(host)
resp = ping(ip, timeout=2, count=4, size=10, df=False)
if resp.success():
print(host)
item = {
"url": host,
"isalvie": 1,
"finalurl": "https://{}".format(host),
}
except Exception as exc:
item = {
"url": host,
"isalvie": 0,
"finalurl": "",
}
return item
async def handle_file(filename):
# 读取域名文件
result_ls = []
with open(filename, 'r', encoding='gb18030', errors='ignore') as fp:
for line in fp.readlines():
host = line.strip()
item = await executor(host)
result_ls.append(item)
print(result_ls)
with open('domain_v2.json', 'w') as f:
f.write(json.dumps(result_ls))
# ping_domain('www.baidrrrru.com')
# ping_domain('www.baidu.com')
# ping_domain('www.sina123456.com')
def main():
# filename = r'D:\tmp\unclass_test'
filename = r'D:\tmp\unclass'
loop = asyncio.get_event_loop()
loop.run_until_complete(handle_file(filename))
if __name__ == '__main__':
start_time = time.time()
main()
print(time.time() - start_time)
| true |
5590ac623b8b6e64da126c90f6b7c435ac99501e | Python | irisfffff/SentimentAnalysis-MovieReviews | /spaCy.py | UTF-8 | 305 | 2.765625 | 3 | [
"MIT"
] | permissive | import spacy
spacy_nlp = spacy.load("en_core_web_sm")
article = "OMG #Twitter is sooooo coooool <3 :-) <– lol...why do i write like this idk right? :) 🤷♀️😂🤖"
doc = spacy_nlp(article)
tokens = [token.text for token in doc]
print('Original Article: %s' % article)
print()
print(tokens) | true |
80df1388a2faec33f9d6868864d151e31d5c4e0f | Python | TPose-Labs/Smart_mirror_interface | /src/utils.py | UTF-8 | 1,821 | 2.859375 | 3 | [] | no_license | from tkinter import Tk, Frame
DAYS = {
"Sun": "Sunday",
"Mon": "Monday",
"Tue": "Tuesday",
"Wed": "Wednesday",
"Thu": "Thursday",
"Fri": "Friday",
"Sat": "Saturday",
"Sun": "Sunday"
}
MONTHS = {
"Jan": "January",
"Feb": "February",
"Mar": "March",
"Apr": "April",
"May": "May",
"Jun": "June",
"Jul": "July",
"Aug": "August",
"Sep": "September",
"Oct": "October",
"Nov": "November",
"Dec": "December"
}
LOCATIONS = {
"left": (0, 0),
"bottom": (1, 0),
"right": (0, 1),
"rbot": (1, 1)
}
sides = {
"left": "w",
"top": "n",
"bottom": "s",
"right": "e",
"rbot": "e"
}
def remove_unicode(_str):
char_list = []
for i in range(len(_str)):
if ord(_str[i]) in range(65536):
char_list.append(_str[i])
_str = ''
for i in char_list:
_str += i
return _str
def overrides(interface_class):
def overrider(method):
assert(method.__name__ in dir(interface_class))
return method
return overrider
class Container:
def __init__(self):
self.root = Tk()
self.modules = []
self.root.attributes('-fullscreen', True)
self.root.bind('<Escape>', lambda e: self.root.destroy())
self.root.configure(background="black")
def add_module(self, module, side, **kwargs):
if "stocks" in kwargs.keys():
self.modules.append(module(Frame(self.root, bg="black"),
side, kwargs["stocks"]))
else:
self.modules.append(module(Frame(self.root, bg="black"),
side))
location = LOCATIONS[side]
self.modules[-1].grid(row=location[0], column=location[1])
def start(self):
self.root.mainloop()
| true |
3a9c8ac9d54a15245adbd1793c2824e734aca287 | Python | chenchuk77/pokerbot | /__OLD/plot_tester.py | UTF-8 | 3,258 | 2.796875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This program is dedicated to the public domain under the CC0 license.
"""
First, a few callback functions are defined. Then, those functions are passed to
the Dispatcher and registered at their respective places.
Then, the bot is started and runs until we press Ctrl-C on the command line.
Usage:
Example of a bot-user conversation using ConversationHandler.
Send /start to initiate the conversation.
Press Ctrl-C on the command line or send a signal to the process to stop the
bot.
"""
# local python resources
import credentials
import db
from datetime import datetime, timedelta, time
import matplotlib.pyplot as plt
import numpy as np
from db import Record
from prettytable import PrettyTable
import logging
from telegram import ParseMode
from telegram import (ReplyKeyboardMarkup, ReplyKeyboardRemove)
from telegram.ext import (Updater, CommandHandler, MessageHandler, Filters,
ConversationHandler, JobQueue)
def tester_generate_graph(club_name):
dates = []
balances = []
query = Record.select().where(Record.club == club_name).order_by(Record.date).execute()
club_records = list(query)
for record in club_records:
dates.append(record.date)
balances.append(record.balance)
x = np.array(dates)
y = np.array(balances)
plt.plot(x, y)
# plt.show()
plt.savefig('graph-{}.png'.format(club_name))
def tester_generate_graph(club_name, until, days_count):
since = until - timedelta(days=days_count)
# temp dict to hold db records
dates_balances = {}
# some dates/balances will not appear in result, we fill the missing values
dates = []
balances = []
query = Record.select().where(
(Record.club == club_name) & (Record.date >= since) & (Record.date <= until)).order_by(Record.date).execute()
club_records = list(query)
for day in days_count: for date in 1-2-2020 1-3-2020
date =
for record in club_records:
dates_balances[record.date] = record.balance
last_balance = 0 # TODO: find init x if no init balance in this date
for single_date in (since + timedelta(n) for n in range(days_count)):
dates.append(single_date)
if single_date in dates_balances.keys():
balances.append(dates_balances[single_date])
else:
# if no balance at this date, we assume the last balance is correct
balances.append(last_balance)
def main():
dates = [1,2,3,4,5,6,7,8,9]
balances = [1,1,1,2,3,4,4,4,5]
dates_balances = { dates[i]:balances[i] for i in dates }
print (dates_balances)
#
# query = Record.select().where(Record.club == 'ultimate').order_by(Record.date).execute()
# club_records = list(query)
# for record in club_records:
# dates.append(record.date)
# balances.append(record.balance)
x = np.array(dates)
y = np.array(balances)
plt.plot(x, y)
plt.show()
#plt.savefig('graph-{}.png'.format(club_name))
### WORKING - PLOT A DICT
# In [12]: x = np.array(dates)
# ...: y = np.array(balances)
# ...: plt.plot(*zip(*dates_balances.items()))
# ...: plt.show()
if __name__ == '__main__':
main()
| true |
e9976883a7ac28fc3ef5d8afadad3cc2ade27037 | Python | budebulai/LightGCS | /tools/sql_tool.py | UTF-8 | 11,750 | 3.21875 | 3 | [] | no_license | # -*- coding:utf-8 -*-
import os
import sqlite3
from functools import wraps
import copy
"""
待优化:
1、字符串拼接时保留引号
劣法:参数填充时字符串值使用单双引号两层包裹
最优:
values = [str(tuple(item)) for item in values]
values = ",".join(values)
较优:对需要保留引号的字符串检出并更改为"'xxx'"形式,怎么实现呢?
def str_convert(s):
return "'" + s + "'"
# for i in range(len(values)):
# for j in range(len(values[i])):
# if isinstance(values[i][j],str):
# values[i][j] = '"' + values[i][j] + '"'
次优:替换法,在字符串值的前后增加特殊字符,待拼接完成后再替换为引号
2、
"""
"""
SQL:结构化查询语言
1、DDL语言(数据定义语言)
用来定义数据库、数据表、视图、索引、触发器
create alter drop
2、DML语言(数据操纵语言)
用于插入、更新、删除数据
insert update delete truncate
3、DQL语言(数据查询语言)
查询数据库中的数据
select
4、DCL语言(数据控制语言)
用来控制用户的访问权限
grant revoke
MySQL数据类型:
数值:TINYINT SMALLINT MEDIUMINT INT BIGINT FLOAT DOUBLE DECIMAL
字符串: CHAR VARCHAR TINYTEXT TEXT
日期、时间: DATE TIME DATETIME TIMESTAMP YEAR
NULL
注:
int(4),显示长度4位,zerofill填充0,99 --> 0099。 int(4) zerofill
float(5,2),总长度为5,小数2位
sqlite数据类型
ALTER TABLE XXX AUTO_INCREMENT=10;
"""
# 电机数据库文件
motor_db = os.path.split(os.path.realpath(__file__))[0] + "\\rotor_db\\motors.db"
def connect_db(db):
"""
sqlite3装饰器
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
conn = sqlite3.connect(db)
cur = conn.cursor()
try:
# 获取SQL语句
sql = func(*args, **kwargs)
# 执行语句
cur.execute(sql)
# 未设置自动提交,此时手动提交。
conn.commit()
# 获取查询结果集
ret = cur.fetchall()
except Exception as e:
# 封装调试用
print str(e)
return None
finally:
# 关闭指针、连接
cur.close()
conn.close()
# 返回查询内容,SELECT时有意义
return ret
return wrapper
return decorator
"""
DDL
"""
@connect_db(motor_db)
def drop_db(params):
"""
params = {"database":database}
"""
return "DROP DATABASE IF EXISTS {};".format(params["database"])
@connect_db(motor_db)
def drop_table(params):
"""
params = {"table":tablename}
"""
return "DROP TABLE IF EXISTS {};".format(params["table"])
@connect_db(motor_db)
def create_table(params):
"""
params = {"table":tablename, "fields":["ID int AUTO_INCREMENT PRIMARY KEY NOT NULL COMMENT XXX","name text DEFAULT XXX",...]}
"""
table = params["table"]
fields = ",".join(params["fields"])
return "CREATE TABLE IF NOT EXISTS {}({});".format(table,fields)
@connect_db(motor_db)
def alter_table(params):
"""
params = {"table":tablename,
"action":["CHANGE", "MODIFY", "RENAME AS", "ADD", "DROP"], #列表之一,字符串
"fields":["AUTO_INCREMENT=10", "new tablename"]} #列表之一,字符串
调节序号:auto_increment=10
修改表名:rename as 新表名
添加字段:add 字段名 列类型
修改字段:modify 字段名 列类型
change 旧字段名 新字段名 列类型
删除字段:drop 字段名
"""
table = params["table"]
action = params.get("action","")
fields = params["fields"]
return "ALTER TABLE {} {} {};".format(table,action,fields)
"""
DML
"""
@connect_db(motor_db)
def insert_items(params):
"""
params = {"table":tablename, "fields":["ID","name",...], "values":[[],[],...]}
不带字段名: insert into tablename values (...),(...),... 全字段填充
插入多行数据:insert into tablename (xx, xx, ...) values(xx,xx,...),(xx,xx,...)
"""
table = params["table"]
fields = params.get("fields","")
values = copy.deepcopy(params["values"])
# for i in range(len(values)):
# print values[i]
# for j in range(len(values[i])):
# if isinstance(values[i][j],str):
# values[i][j] = '"' + values[i][j] + '"'
# temp = ",".join(values[i])
# values[i] = "({})".format(temp)
if len(fields) == 1:
if len(values) == 1:
if isinstance(values[0],str):
values[0] = '"' + values[0] + '"'
values = "({})".format(values[0])
else:
values = [value for item in values for value in item]
for i in range(len(values)):
if isinstance(values[i],str):
values[0] = '"' + values[0] + '"'
values[i] = "({})".format(values[i])
values = ",".join(values)
else:
values = [str(tuple(item)) for item in values]
values = ",".join(values)
if fields:
fields = "(" + ",".join(fields) + ")"
# print "INSERT INTO {} {} VALUES{};".format(table,fields,values)
return "INSERT INTO {} {} VALUES{};".format(table,fields,values)
@connect_db(motor_db)
def update_table(params):
"""
params = {"table":tablename,"fields":{"col1":value,"col2":value,...}, "condition": "where ..."}
update tablename set column1_name = value [, column2_name=value,...] [where condition];
修改表数据
"""
table = params["table"]
fields = params["fields"]
condition = params.get("condition","")
temp = []
for key,value in fields.items():
if isinstance(value,str):
value = '"' + value + '"'
temp.append("{}={}".format(key,value))
values = ",".join(temp)
if condition:
condition = "WHERE " + condition
return "UPDATE {} SET {} {};".format(table,values,condition)
@connect_db(motor_db)
def delete_items(params):
"""
params = {"table":tablename,"condition":xxx}
delete from tablename where condition;
"""
condition = params.get("condition","")
if condition:
condition = "WHERE " + condition
# print "DELETE FROM {} {};".format(params["table"],condition)
return "DELETE FROM {} {};".format(params["table"],condition)
@connect_db(motor_db)
def truncate_table(params):
"""
params = {"table":tablename}
truncate [table] tablename;
用于完全清空表数据,但表结构、索引、约束等不变。
区别于DELETE命令:
同:都删除数据,不删除表结构,但TRUNCATE更快
不同:1、使用TRUNCATE重新设置AUTO_INCREMENT计数器
2、使用TRUNCATE不会对事务有影响
"""
return "TRUNCATE {};".format(params["table"])
"""
DQL
"""
@connect_db(motor_db)
def show_tables():
return "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name;"
@connect_db(motor_db)
def table_query(params):
"""
params = {"table":tablename, "fields":["ID","name",...], "conditions":xxx}
"""
table = params["table"]
fields = params.get("fields","")
condition = params.get("condition","")
if not fields:
fields = "*"
fields = ",".join(fields)
if condition:
condition = "WHERE " + condition
return "SELECT {} FROM {} {};".format(fields, table, condition)
@connect_db(motor_db)
def head_query(params):
"""
params = {"table":tablename}
查询表字段
"""
# sql = "SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = {};".format(params["table"])
return "PRAGMA table_info({});".format(params["table"])
"""
视图
示例:创建master_view
CREATE VIEW master_view
AS
SELECT id,name FROM student;
SELECT * FROM master_view;
DESC master_view;
SHOW CREATE VIEW master_view;
ALTER VIEW master_view
AS
SELECT id,name,email FROM student;
UPDATE master_view SET xx=11 WHERE xx=xxx;
DROP VIEW master_view;
"""
"""
事务
将一组语句SQL放在同一批次内去执行,如果一个SQL语句出错,则该批次内的所有SQL都将被取消执行
如银行转帐,中途出现错误,全部回滚
MySQL事务处理只支持InnoDB和BDB数据表类型
ACID:
原子性(atomic)
一致性(consist)
隔离性(isolated)
持久性(durable)
关闭自动提交
SELECT @@autocommit;
SET autocommit=0;
MySQL事务控制
START TRANSACTION;
语句(组)
COMMIT;
ROLLBACK;
SET autocommit=1;
sqlite3事务控制
使用下面的命令来控制事务:
BEGIN TRANSACTION:开始事务处理。
COMMIT:保存更改,或者可以使用 END TRANSACTION 命令。
ROLLBACK:回滚所做的更改。
事务控制命令只与 DML 命令 INSERT、UPDATE 和 DELETE 一起使用。他们不能在创建表或删除表时使用,因为这些操作在数据库中是自动提交的。
"""
"""
触发器
四要素:
1、监视地点table
2、监视事件insert/update/delete
3、触发时间after/before
4、触发事件insert/update/delete
CREATE TRIGGER triggerName
{BEFORE | AFTER}
{INSERT | UPDATE | DELETE}
ON tablename
FOR EACH ROW
BEGIN
触发器SQL语句;
END;
DROP TRIGGER triggerName;
"""
def create_table_motorList():
params = {"table":"motorList"}
params["fields"] = ["Motor varchar(50) PRIMARY KEY NOT NULL"]
create_table(params)
def drop_table_motorList():
params = {"table":"motorList"}
drop_table(params)
def create_table_motorData():
params = {"table":"motorData"}
params["fields"] = ["Motor VARCHAR(50)",\
"Voltage FLOAT(5,2)",\
"Propeller INT(6)",\
"Throttle VARCHAR(4)",\
"Amps FLOAT(5,2)",\
"Watts INT(6)",\
"Thrust FLOAT(8,2)",\
"RPM INT(5)",\
"Moment FLOAT(5,2)",\
"Efficiency FLOAT(5,2)"]
create_table(params)
def drop_table_motorData():
params = {"table":"motorData"}
drop_table(params)
def create_table_motorInfo():
params = {"table":"motorInfo"}
params["fields"] = ["Motor VARCHAR(50) PRIMARY KEY NOT NULL",\
"Producer VARCHAR(50)",\
"Type VARCHAR(50)",\
"KV VARCHAR(10)",\
"Voltage FLOAT(5,2)",\
"Amps FLOAT(5,2)",\
"Watts INT(6)",\
"Resistor FLOAT(4,2)",\
"AmpNoLoad FLOAT(4,2)"]
create_table(params)
def drop_table_motorInfo():
params = {"table":"motorInfo"}
drop_table(params)
def create_table_propellerInfo():
params = {"table":"propellerInfo"}
params["fields"] = ["Producer VARCHAR(50)",\
"Propeller INT(6)",\
"Type VARCHAR(50)",\
"cT FLOAT(6,2)",\
"cM FLOAT(6,2)"]
create_table(params)
def drop_table_propellerInfo():
params = {"table":"propellerInfo"}
drop_table(params)
| true |
e7466ee45b01b929429bc53eae66f70640be4690 | Python | michaeldmoser/Backcountry-Tracks | /services/Adventurer/adventurer/users.py | UTF-8 | 1,962 | 3 | 3 | [] | no_license | import uuid
import copy
class Users(object):
def __init__(self, bucket = None):
self.bucket = bucket
def get_by_id(self, user_id):
'''Will retrieve a user by their id'''
userobj = self.bucket.get(str(user_id))
if not userobj.exists():
raise KeyError("No such user")
user = userobj.get_data()
return user
def get_by_email(self, email):
'''Will retrieve a user object by the email address'''
email_ref_obj = self.bucket.get(str(email))
if not email_ref_obj.exists():
raise KeyError('No such user')
email_ref = email_ref_obj.get_data()
return self.get_by_id(email_ref['key'])
def __save_the_user(self, user_data, key):
userobj = self.bucket.get(str(key))
if userobj.exists():
olddata = userobj.get_data()
olddata.update(user_data)
data_to_store = olddata
else:
userobj = self.bucket.new(key, user_data)
user_data['key'] = key
data_to_store = user_data
userobj.set_data(data_to_store)
userobj.set_usermeta({
'object_type': 'user_profile'
})
userobj.store()
return data_to_store
def __save_email_reference(self, key, email):
email_ref = self.bucket.get(str(email))
if not email_ref.exists():
email_ref = self.bucket.new(email, {'key': key})
email_ref.store()
def save(self, user_data = {}, user_id = None):
'''Saves a user object to the database'''
email = str(user_data.get('email', None))
if len(email) < 5:
raise Exception("An email address must be provided.")
key = user_id if user_id is not None else str(uuid.uuid4())
user_data = self.__save_the_user(user_data, key)
self.__save_email_reference(key, email)
return user_data
| true |
39c627b25c886d08677459446df54dd499b221f1 | Python | Preethi-design/python_assignment_1 | /dictionary_TBI.py | UTF-8 | 1,235 | 4.40625 | 4 | [] | no_license | print("Dictionary Methods")
d = {1: "one", 2: "two"}
print(d)
print("#clear")
d.clear()
print('d =', d)
print("#copy()")
original = {1:'one', 2:'two'}
new = original.copy()
print('Orignal: ', original)
print('New: ', new)
print("#From Keys")
keys = {'a', 'e', 'i', 'o', 'u' }
vowels = dict.fromkeys(keys)
print(vowels)
print("#get()")
person = {'name': 'Phill', 'age': 22}
print('Name: ', person.get('name'))
print('Age: ', person.get('age'))
print("#items()")
sales = { 'apple': 2, 'orange': 3, 'grapes': 4 }
print(sales.items())
print("#keys")
person = {'name': 'Phill', 'age': 22, 'salary': 3500.0}
print(person.keys())
print("#popitem()")
person = {'name': 'Phill', 'age': 22, 'salary': 3500.0}
result = person.popitem()
print('Return Value = ', result)
print('person = ', person)
print("#setdefault")
person = {'name': 'Phill', 'age': 22}
age = person.setdefault('age')
print('person = ',person)
print('Age = ',age)
print("#pop")
sales = { 'apple': 2, 'orange': 3, 'grapes': 4 }
element = sales.pop('apple')
print('The popped element is:', element)
print('The dictionary is:', sales)
print("#update")
d = {1: "one", 2: "three"}
d1 = {2: "two"}
d.update(d1)
print(d)
print("#add")
d1 = {3: "three"}
d.update(d1)
print(d)
| true |
7ff350cf75f3582215b776a9c6f524dfdca9d706 | Python | donniet/ros_pantilt_pkg | /scripts/track.py | UTF-8 | 4,049 | 2.65625 | 3 | [] | no_license | #!/usr/bin/env python3
import argparse
from functools import partial
import rospy
from pantilt_pkg.msg import Detect
from geometry_msgs.msg import Pose, Quaternion, Vector3
from pantilt_pkg.msg import Pose
# from here: https://www.pyimagesearch.com/2016/11/07/intersection-over-union-iou-for-object-detection/
def bb_intersection_over_union(boxA, boxB):
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
class Tracker(object):
yaw = 0
pitch = 0
publisher = None
fov = [48.8, 48.8]
box = [0,0,1,1]
obj = [0,0,0]
score = 1
initial = True
def __init__(self, yaw, pitch, publisher):
self.yaw = yaw
self.pitch = pitch
self.pub = publisher
def process_detection(self, detect):
self.detect_stamp = detect.stamp
if detect.num == 0:
return
max_iou = 0
max_index = -1
for i in range(detect.num):
iou = bb_intersection_over_union(self.box, detect.boxes[i].coords)
if iou > max_iou:
max_iou = iou
max_index = i
#print('found: {}'.format(detect.boxes[i]))
#print('cvrtd: {}'.format(boxes[i]))
if max_index < 0 and detect.scores[0] > 0.5:
self.box = detect.boxes[0].coords
self.score = detect.scores[0]
self.adjust_position()
elif detect.scores[max_index] > 0.5:
self.box = detect.boxes[max_index].coords
self.score = detect.scores[max_index]
self.adjust_position()
def process_position(self, position):
self.yaw = position.yaw
self.pitch = position.pitch
self.pose_stamp = position.stamp
def adjust_position(self):
# find the center of the box
#print('adjusting to: {}'.format(self.box))
x = 0.5 * (self.box[3] + self.box[1]) - 0.5
y = 0.5 * (self.box[2] + self.box[0]) - 0.5
#print('box center: {} {}'.format(x, y))
# multiply by the field of view
dx = -x * self.fov[0]
dy = y * self.fov[1]
# soften the values
dx /= 5
dy /= 5
# add that to the current yaw
#TODO: these bounds checking should be replaced by feedback from the pantilt camera's actual position. How do you query a node to get data from it?
pitch = self.pitch + dy
yaw = self.yaw + dx
#print('positioning: {} {}'.format(self.pitch, self.yaw))
p = Pose()
p.stamp = self.detect_stamp
p.pitch = pitch
p.yaw = yaw
self.pub.publish(p)
def track(args):
rospy.init_node('object_tracker', anonymous=True)
pub = rospy.Publisher('pantiltPose', Pose, queue_size=1)
tracker = Tracker(args.initial_yaw, args.initial_pitch, pub)
rospy.Subscriber('detections', Detect, lambda d: tracker.process_detection(d))
rospy.Subscriber('pantilt', Pose, lambda v: tracker.process_position(v))
rospy.spin()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--initial_yaw', type=float, default=175, help='initial yaw for pantilt camera')
parser.add_argument('--initial_pitch', type=float, default=120, help='initial pitch for pantilt camera')
args, _ = parser.parse_known_args()
track(args)
| true |
76d362c2de9c8f04cb7f6f1c6760f4bb77d14d28 | Python | palmarytech/Python_Snippet | /Snap7_Exer/Test.py | UTF-8 | 3,960 | 2.515625 | 3 | [] | no_license | import snap7.client
import mySnap7, byte_array
nameKey = "name"
dataTypeKey = "datatype"
offsetKey = "offset"
if __name__ == "__main__":
# =================== Connection ======================
plc = snap7.client.Client()
plc.connect('10.101.100.45', 0, 0)
# =================== Load Config ======================
with open('DB_format.csv', 'r', encoding="utf-8") as f_read:
strContent = f_read.read()
dictSortedDB_items = mySnap7.csv2dict(strContent)
# =================== Read the whole DB ======================
# get the max length(offset value) in datablock
nLength = mySnap7.get_db_size(dictSortedDB_items, dataTypeKey, offsetKey)
print("nLength: ", nLength)
# Read the whole db value
objDB_Results = mySnap7.DBRead(plc, 7, nLength, dictSortedDB_items)
for tag_name, value in objDB_Results.__dict__.items():
print(tag_name,": ", value)
# =================== Read one single tag from DB ======================
value = mySnap7.DBReadTag(plc, 7, 0, "Real")
print("value: ", value)
value = mySnap7.DBReadTag(plc, 7, 4, "Bool")
print("value: ", value)
value = mySnap7.DBReadTag(plc, 7, 4.1, "Bool")
print("value: ", value)
value = mySnap7.DBReadTag(plc, 7, 6, "Int")
print("value: ", value)
value = mySnap7.DBReadTag(plc, 7, 8, "String")
print("value: ", value)
value = mySnap7.DBReadTag(plc, 7, 264, "DInt")
print("value: ", value)
value = mySnap7.DBReadTag(plc, 7, 268, "UDInt")
print("value: ", value)
value = mySnap7.DBReadTag(plc, 7, 272, "Int")
print("value: ", value)
value = mySnap7.DBReadTag(plc, 7, 274, "UInt")
print("value: ", value)
value = mySnap7.DBReadTag(plc, 7, 276, "SInt")
print("value: ", value)
value = mySnap7.DBReadTag(plc, 7, 277, "USInt")
print("value: ", value)
value = mySnap7.DBReadTag(plc, 7, 278, "Byte")
print("value: ", value)
value = mySnap7.DBReadTag(plc, 7, 280, "DTL")
print("value: ", value)
# =================== Write one single value to DB by tag ======================
mySnap7.DBWriteByTag(plc, 7, dictSortedDB_items["Temperature"], 1111)
mySnap7.DBWriteByTag(plc, 7, dictSortedDB_items["Cold"], 1)
mySnap7.DBWriteByTag(plc, 7, dictSortedDB_items["Hot"], 0)
mySnap7.DBWriteByTag(plc, 7, dictSortedDB_items["Rpis_to_Buy"], 9)
mySnap7.DBWriteByTag(plc, 7, dictSortedDB_items["Notes"], "hiHiHi")
mySnap7.DBWriteByTag(plc, 7, dictSortedDB_items["var_DINT"], -1234567)
mySnap7.DBWriteByTag(plc, 7, dictSortedDB_items["var_UDINT"], 1234567)
mySnap7.DBWriteByTag(plc, 7, dictSortedDB_items["var_INT"], -1234)
mySnap7.DBWriteByTag(plc, 7, dictSortedDB_items["var_UINT"], 1234)
mySnap7.DBWriteByTag(plc, 7, dictSortedDB_items["var_SINT"], -123)
mySnap7.DBWriteByTag(plc, 7, dictSortedDB_items["var_USINT"], 123)
mySnap7.DBWriteByTag(plc, 7, dictSortedDB_items["var_Byte"], 101)
import datetime
mySnap7.DBWriteByTag(plc, 7, dictSortedDB_items["var_DTL"], datetime.datetime.now())
# =================== Write one single value to DB by offset ======================
mySnap7.DBWriteByOffset(plc, 7, 0, "Real", 1111)
# mySnap7.DBWriteByOffset(plc, 7, 4, "Bool", 1)
# mySnap7.DBWriteByOffset(plc, 7, 4.1, "Bool", 0)
mySnap7.DBWriteByOffset(plc, 7, 6, "Int", 100)
mySnap7.DBWriteByOffset(plc, 7, 8, "String", "Hello Snap7")
mySnap7.DBWriteByOffset(plc, 7, 264, "DInt", -765443321)
mySnap7.DBWriteByOffset(plc, 7, 268, "UDInt", 765443321)
mySnap7.DBWriteByOffset(plc, 7, 272, "Int", -9876)
mySnap7.DBWriteByOffset(plc, 7, 274, "UInt", 9876)
mySnap7.DBWriteByOffset(plc, 7, 276, "SInt", -125)
mySnap7.DBWriteByOffset(plc, 7, 277, "USInt", 125)
mySnap7.DBWriteByOffset(plc, 7, 278, "Byte", 100)
mySnap7.DBWriteByOffset(plc, 7, 280, "DTL", datetime.datetime.now())
plc.disconnect() | true |
a1e311506afd0298c3bc550b03350302183010e2 | Python | zoraZz/Test | /clearText.py | UTF-8 | 419 | 2.796875 | 3 | [] | no_license | from selenium import webdriver
import time
driver = webdriver.Chrome()
driver.maximize_window()
driver.get('http://www.baidu.com')
driver.find_element_by_id('kw').send_keys('selenium')
time.sleep(5)
try:
#清除文本内容
driver.find_element_by_id('kw').clear()
#刷新当前页面
driver.refresh()
print('test passed')
except Exception as e:
print('test failed',format(e))
# driver.quit() | true |
2798d8048ad724bb84de7e26f4b6df9057ba9284 | Python | marszed1997/LeetCode | /LeetCode 820.py | UTF-8 | 1,290 | 3.375 | 3 | [] | no_license | # https://leetcode-cn.com/problems/short-encoding-of-words/
class Trie:
def __init__(self):
self.p = 0
self.trie = [[0 for _ in range(100000)] for _ in range(26)]
class Solution:
def __init__(self):
self.T = Trie()
def InTrie(self, word):
pos = 0
for i in range(len(word)):
pos = self.T.trie[ord(word[i]) - ord('a')][pos]
if pos == 0:
return False
return True
def minimumLengthEncoding(self, words):
"""
:param words: List[str]
:return: int
"""
ans = 0
words.sort(key=lambda word: len(word), reverse=True)
# print(words)
for word in words:
word = word[::-1]
# print(word)
if self.InTrie(word):
continue
pos = 0
for i in range(len(word)):
if self.T.trie[ord(word[i]) - ord('a')][pos] != 0:
pos = self.T.trie[ord(word[i]) - ord('a')][pos]
else:
# print(self.T.p + 1, word[i])
self.T.p += 1
self.T.trie[ord(word[i]) - ord('a')][pos] = self.T.p
pos = self.T.p
ans += 1 + len(word)
return ans | true |
d1aa75e96d0ea6c17b9509e0f30cfecd30be5707 | Python | falecomlara/CursoEmVideo | /ex009 - tabuada.py | UTF-8 | 245 | 4.09375 | 4 | [] | no_license | #entre com um número e retorne sua tabuada
n1 = int(input('Entre com um número: '))
n2 = 0
contador = 0
for tabuada in range(11):
resultado = n1 * n2
print ('A tabuada de {}x{}={}'.format(n1,n2,resultado))
contador += 1
n2 += 1 | true |
668c34fcb78b85c3ab383b712cb6fd889c94c944 | Python | UWPCE-PythonCert-ClassRepos/Self_Paced-Online | /students/njschafi/Lesson07/html_render.py | UTF-8 | 3,244 | 3.40625 | 3 | [] | no_license | #!/usr/bin/env python3
# NEIMA SCHAFI, LESSON 7 Assignment - HTML RENDERER
"""
A class-based system for rendering html.
"""
# This is the framework for the base class
class Element(object):
"""Main class for object"""
tag = 'html'
indent = ' '
def __init__(self, content=None, **kwargs):
"""Default Object initializer"""
if content is not None:
self.stuff = [content]
elif content is None:
self.stuff = []
self.attributes = kwargs
def append(self, new_content):
"""Appends new material to object content list"""
self.stuff.append(new_content)
def render(self, out_file, cur_int=""):
"""Renders content and tags to text and writes to file"""
out_file.write(cur_int + '<{}'.format(self.tag))
for key, value in self.attributes.items():
out_file.write(' {}="{}"'.format(key, value))
out_file.write('>\n')
for item in self.stuff:
if isinstance(item, Element):
item.render(out_file, cur_int + self.indent)
out_file.write('\n')
else:
out_file.write(cur_int + self.indent + item + '\n')
out_file.write(cur_int + '</{}>'.format(self.tag))
class Html(Element):
tag = 'html'
def render(self, out_file, cur_int=""):
out_file.write(cur_int + "<!DOCTYPE html>\n")
Element.render(self, out_file, cur_int)
class Body(Element):
tag = 'body'
class P(Element):
tag = 'p'
class Head(Element):
tag = 'head'
class OneLineTag(Element):
"""Subclass of Element that allows for one line tagging"""
def render(self, out_file, cur_int=""):
"""Renders contents and tags onto a single line"""
out_file.write(cur_int + '<{}'.format(self.tag))
for key, value in self.attributes.items():
out_file.write(' {}="{}"'.format(key, value))
out_file.write('>')
for item in self.stuff:
out_file.write(item)
out_file.write('</{}>'.format(self.tag))
def append(self, content):
raise NotImplementedError
class Title(OneLineTag):
tag = 'title'
class SelfClosingTag(Element):
"""Subclass of Element that allows for self closing tags"""
def render(self, out_file, cur_int=""):
"""Renders contents for self closing tags"""
# raise an exception if there is content
out_file.write(cur_int + '<{}'.format(self.tag))
for key, value in self.attributes.items():
out_file.write(' {}="{}"'.format(key, value))
if self.stuff:
raise TypeError
out_file.write(' />')
class Hr(SelfClosingTag):
tag = 'hr'
class Br(SelfClosingTag):
tag = 'br'
class A(OneLineTag):
tag = 'a'
def __init__(self, link, content=None, **kwargs):
kwargs['href'] = link
super().__init__(content, **kwargs)
class Ul(Element):
tag = 'ul'
class Li(Element):
tag = 'li'
class H(OneLineTag):
tag = 'h'
def __init__(self, level, content=None, **kwargs):
super().__init__(content, **kwargs)
self.level = level
self.tag = ('h{}'.format(level))
class Meta(SelfClosingTag):
tag = 'meta'
| true |
aaf7d257d113cd34596dc8e00e4b55e60a62293c | Python | Chive/adventofcode | /day1/counter.py | UTF-8 | 1,301 | 4.03125 | 4 | [] | no_license | import sys
def sum_recurring_digits(sequence: str):
"""
Reviews a sequence of digits and finds the sum of all digits that match the
next digit in the list. The list is circular, so the digit after the last
digit is the first digit in the list.
"""
total = 0
i = 0
count = len(sequence)
while i < count:
digit = int(sequence[i])
if i + 1 < count:
next_digit = sequence[i + 1]
else:
# the list is circular,
# check the first digit
next_digit = sequence[0]
next_digit = int(next_digit)
if digit == next_digit:
total += digit
i += 1
return total
def test():
assert sum_recurring_digits('1122') == 3
assert sum_recurring_digits('1111') == 4
assert sum_recurring_digits('1234') == 0
assert sum_recurring_digits('91212129') == 9
if __name__ == '__main__':
if len(sys.argv) == 2:
sequence = sys.argv[1]
else:
sequence = sys.stdin.read().strip()
if not sequence:
print("Usage: python counter.py < <input>")
exit(1)
if sequence == 'test':
print('Running tests')
test()
else:
result = sum_recurring_digits(sequence)
print('Result: {}'.format(result))
| true |
a13fbbbc4c6dabacd5c510ac18c3be1c8210fae4 | Python | WeaselE/WebScraping | /WebScraperPractice.py | UTF-8 | 1,421 | 2.609375 | 3 | [] | no_license | import requests
from bs4 import BeautifulSoup
url = 'https://realpython.github.io/fake-jobs/'
param = 'jobs/'
job = 'senior-python-developer-0.html'
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html.parser')
results = soup.find(id='ResultsContainer')
# print(results.prettify())
job_elements = results.find_all('div', class_='card-content')
for job_element in job_elements:
title_element = job_element.find('h2', class_='title is-5')
company_element = job_element.find('h3', class_='subtitle is-6 company')
location_element = job_element.find('p', class_='location')
# print(title_element.text.strip())
# print(company_element.text.strip())
# print(location_element.text.strip())
# print()
# with open('MainPage.html', 'w') as fl:
# fl.write(r.text)
python_jobs = results.find_all('h2', string=lambda text:'engineer' in text.lower())
python_job_elements = [h2_elements.parent.parent.parent for h2_elements in python_jobs]
for job_element in python_job_elements:
title_element = job_element.find('h2', class_='title is-5')
company_element = job_element.find('h3', class_='subtitle is-6 company')
location_element = job_element.find('p', class_='location')
links = job_element.find_all('a')[1]['href']
print(title_element.text.strip())
print(company_element.text.strip())
print(location_element.text.strip())
print(links)
print()
| true |
0ad4828d3189086a1a02066b04e05f423e0b5b8e | Python | bitflow-stream/python-bitflow | /bitflow/marshaller.py | UTF-8 | 6,126 | 2.671875 | 3 | [
"Apache-2.0"
] | permissive | import datetime
import struct
from bitflow.sample import Sample, Header
class BitflowProtocolError(Exception):
def __init__(self, description, expected=None, received=None):
msg = "Bitflow binary protocol error: {}.".format(description)
if expected is not None:
msg += " Expected: {} (type {}).".format(expected, type(expected))
if received is not None:
msg += " Received: {} (type {}).".format(received, type(received))
super().__init__(msg)
TIMESTAMP_NUM_BYTES = 8
METRIC_NUM_BYTES = 8
HEADER_START = "timB"
TAGS_FIELD = "tags"
SAMPLE_MARKER_BYTE = b'X'
SEPARATOR_BYTE = b'\n'
TAGS_SEPARATOR = " "
TAGS_EQ = "="
class BinaryMarshaller:
# ===============
# General helpers
# ===============
def pack_long(self, value):
return struct.pack('>Q', value)
def pack_double(self, value):
return struct.pack('>d', value)
def unpack_long(self, data):
return struct.unpack('>Q', data)[0]
def unpack_double(self, data):
return struct.unpack('>d', data)[0]
def pack_string(self, string):
return bytes(string, "UTF-8")
def unpack_string(self, data):
return data.decode("UTF-8")
# =======================================
# Reading and parsing samples and headers
# =======================================
# Read either a Header or a Sample from the stream.
def read(self, stream, previousHeader):
try:
return self._read(stream, previousHeader)
except (struct.error, UnicodeDecodeError) as e:
raise BitflowProtocolError("failed to parse data: {}".format(str(e)))
def _read(self, stream, previousHeader):
if previousHeader is None:
return self.read_header(stream)
start = stream.peek(len(SAMPLE_MARKER_BYTE))
if len(start) == 0:
return None # Possible EOF
elif len(start) >= len(SAMPLE_MARKER_BYTE) and start[:len(SAMPLE_MARKER_BYTE)] == SAMPLE_MARKER_BYTE:
return self.read_sample(stream, previousHeader)
else:
return self.read_header(stream)
def read_header(self, stream):
timeField = self.read_line(stream) # Header fields are terminated by newline characters
if timeField == "":
return None # Possible EOF
if timeField != HEADER_START:
raise BitflowProtocolError("unexpected line", HEADER_START, timeField)
tagsField = self.read_line(stream)
if tagsField != TAGS_FIELD:
raise BitflowProtocolError("unexpected line", TAGS_FIELD, tagsField)
fields = []
while True:
fieldName = self.read_line(stream)
if len(fieldName) == 0:
break # Empty line terminates the header
fields.append(fieldName)
return Header(fields)
def read_sample(self, stream, header):
stream.read(len(SAMPLE_MARKER_BYTE)) # Result ignored, was already peeked
num_fields = header.num_fields()
timeBytes = stream.read(TIMESTAMP_NUM_BYTES)
tagBytes = self.read_line(stream) # New line terminates the tags
valueBytes = stream.read(num_fields * METRIC_NUM_BYTES)
timestamp = self.unpack_utc_nanos_timestamp(self.unpack_long(timeBytes))
tags = self.parse_tags(tagBytes)
metrics = []
for index in range(num_fields):
offset = index * METRIC_NUM_BYTES
metricBytes = valueBytes[offset: offset + METRIC_NUM_BYTES]
metric = self.unpack_double(metricBytes)
metrics.append(metric)
return Sample(header=header, metrics=metrics, timestamp=timestamp, tags=tags)
def parse_tags(self, tags_string):
tags_dict = {}
if tags_string == "":
return tags_dict
tags = tags_string.split(TAGS_SEPARATOR)
for tag_string in tags:
if TAGS_EQ in tag_string:
key, value = tag_string.split(TAGS_EQ)
tags_dict[key] = value
else:
raise BitflowProtocolError("illegal tag string", "key=value pair", tag_string)
return tags_dict
def read_line(self, stream):
return self.unpack_string(stream.readline())[:-1]
# ==========================================
# Formatting and sending samples and headers
# ==========================================
def write_sample(self, stream, sample):
stream.write(SAMPLE_MARKER_BYTE)
stream.write(self.pack_long(self.pack_utc_nanos_timestamp(sample)))
stream.write(self.pack_string(self.format_tags(sample)))
stream.write(SEPARATOR_BYTE)
for val in sample.metrics:
stream.write(self.pack_double(val))
def write_header(self, stream, header):
for field in [HEADER_START, TAGS_FIELD] + header.metric_names:
stream.write(self.pack_string(field))
stream.write(SEPARATOR_BYTE)
stream.write(SEPARATOR_BYTE)
def format_tags(self, sample):
s = ""
pairs = ["{}={}".format(key, value) for key, value in sample.get_tags().items()]
pairs.sort()
return " ".join(pairs)
# ==================
# Timestamp handling
# ==================
# Note: Bitflow timestamps are represented in UTC, both in binary marshalled format, and internally.
# Printing the timestamps as-is might result in a time that deviates from the local time.
# Especially, UTC timetamps differ from what is printed by the Go-based bitflow-pipeline tool, which converts to local time.
epoch = datetime.datetime.utcfromtimestamp(0)
def unpack_utc_nanos_timestamp(self, timestamp):
seconds = timestamp // 1000000000
micros = (timestamp // 1000) % 1000000
return datetime.datetime.utcfromtimestamp(seconds) + datetime.timedelta(microseconds=micros)
def pack_utc_nanos_timestamp(self, sample):
time = sample.get_timestamp()
delta = time - self.epoch
return int(delta.total_seconds() * 1000000000) # Nanoseconds, rounded to microseconds
| true |
4394158213f5cb0b34c249c72a7e36d7eac6c80d | Python | saeschdivara/ArangoPy | /arangodb/tests/user.py | UTF-8 | 742 | 2.75 | 3 | [
"MIT"
] | permissive | from arangodb.tests.base import ExtendedTestCase
from arangodb.api import Database
from arangodb.user import User
class UserTestCase(ExtendedTestCase):
def setUp(self):
self.database_name = 'testcase_user_123'
self.db = Database.create(name=self.database_name)
def tearDown(self):
Database.remove(name=self.database_name)
def test_get_root(self):
root = User.get(name='root')
self.assertEqual(root.name, 'root')
def test_create_and_delete_user_foo(self):
user_name = 'foo'
User.create(name=user_name, password='extra_key')
foo_user = User.get(name=user_name)
self.assertEqual(foo_user.name, user_name)
User.remove(name=user_name)
| true |
d5b0c36ccc7ba5121e4d1aaa5cef83266003a5f6 | Python | SprintGhost/LeetCode | /697.数组的度.py | UTF-8 | 1,527 | 3.046875 | 3 | [
"Unlicense"
] | permissive | #
# @lc app=leetcode.cn id=697 lang=python3
#
# [697] 数组的度
#
# Accepted
# 89/89 cases passed (152 ms)
# Your runtime beats 95.78 % of python3 submissions
# Your memory usage beats 14.29 % of python3 submissions (15.4 MB)
# @lc code=start
class element:
def __init__(self,start_index, end_index):
self.start_index = start_index
self.end_index = end_index
self.count = 0
class Solution:
def findShortestSubArray(self, nums) -> int:
max_elemnt = -1
max_number = -1
max_len = 0xfffffffff
temp = dict()
for index, each in enumerate(nums):
if each not in temp:
e_element = element(index, index)
temp[each] = e_element
temp[each].count += 1
temp[each].end_index = index
if temp[each].count > max_number:
max_number = temp[each].count
max_elemnt = each
elif (each != max_elemnt) and (temp[each].count == max_number):
temp1 = len(nums[temp[max_elemnt].start_index:temp[max_elemnt].end_index + 1])
temp2 = len(nums[temp[each].start_index:temp[each].end_index + 1])
if temp2 < temp1:
max_number = temp[each].count
max_elemnt = each
else:
pass
return len(nums[temp[max_elemnt].start_index:temp[max_elemnt].end_index + 1])
# A = Solution()
# A.findShortestSubArray([1,2,2,1,2,1,1,1,1,2,2,2])
# @lc code=end
| true |
c74ea8a65c762aefdd0d51e0e6e5108e810f44a4 | Python | KATO-Hiro/AtCoder | /typical90/bd/main.py | UTF-8 | 1,056 | 3.21875 | 3 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | # -*- coding: utf-8 -*-
def main():
import sys
input = sys.stdin.readline
n, s = map(int, input().split())
a, b = [0] * n, [0] * n
dp = [[False] * (s + 10) for _ in range(n + 10)]
dp[0][0] = True
for i in range(n):
a[i], b[i] = map(int, input().split())
for i in range(1, n + 1):
for j in range(1, s + 1):
diff1 = j - a[i - 1]
diff2 = j - b[i - 1]
if diff1 >= 0 and dp[i - 1][diff1]:
dp[i][j] = True
if diff2 >= 0 and dp[i - 1][diff2]:
dp[i][j] = True
if dp[n][s]:
ans = ""
pos = s
for i in range(n, 0, -1):
ai = a[i - 1]
bi = b[i - 1]
if pos >= ai and dp[i - 1][pos - ai]:
ans += "A"
pos -= ai
elif pos >= bi and dp[i - 1][pos - bi]:
ans += "B"
pos -= bi
print(ans[::-1])
else:
print("Impossible")
if __name__ == "__main__":
main()
| true |
8a25ec713001335804b83bf86b8add70c86d4e50 | Python | Mateusz-Grzelinski/logit-formula-generator | /logic_formula_generator/generators/contraint_solver/first_order_logic/cnf_constraint_solver.py | UTF-8 | 1,223 | 2.96875 | 3 | [] | no_license | import random
from abc import abstractmethod
from typing import Iterable, List, Dict
from logic_formula_generator.generators.utils._range import IntegerRange
class CNFConstraintSolver:
def __init__(self, allowed_clause_lengths: List, number_of_clauses: IntegerRange, number_of_literals: IntegerRange):
self.literal_coefficients = allowed_clause_lengths
self.number_of_literals = number_of_literals
self.number_of_clauses = number_of_clauses
@abstractmethod
def solve(self) -> Iterable[Dict[int, int]]:
"""Yields solutions for given constraints
:returns dict with solution - key is clause length, value is number of clauses
"""
raise NotImplementedError
def solve_in_random_order(self, skip_chance: float = None):
skip_chance = random.random() if skip_chance is None else skip_chance
if skip_chance > 0.5:
skip_chance = 0.5
cache = []
for solution in self.solve():
if random.random() < skip_chance:
yield solution
else:
cache.append(solution)
random.shuffle(cache)
for cached_solution in cache:
yield cached_solution
| true |
d8b3f901ff13ccb30cc4802b94267ad1f9d210f2 | Python | Cosmo65/organizador | /file_organizer/date.py | UTF-8 | 1,242 | 3.125 | 3 | [] | no_license | import os
from datetime import date
class OrganizerByDate:
def __init__(self, current_dir: str = os.getcwd(), target_dir: str = './'):
self._current_dir = os.path.abspath(current_dir)
self._target_dir = os.path.abspath(target_dir)
def start(self):
"""
Função responsavel pelo start do processo de discovering e pela chamada do processamento
"""
for root, folders, file in os.walk(self._current_dir, topdown=True):
self._process((root, folders, file))
def _process(self, info: tuple):
"""
Função responsavel pelo rocessamento dos arquivos,
param info:
"""
root = info[0]
files = info[2]
for file in files:
file_path = f"{root}\\{file}"
folder_name = str(date.fromtimestamp(os.path.getmtime(file_path)))
target_folder_path = f"{self._target_dir}\\{folder_name}"
if not os.path.exists(target_folder_path):
os.mkdir(f"{target_folder_path}")
self._move_file(file_path, f"{target_folder_path}\\{file}")
@staticmethod
def _move_file(current_file_path: str, target_path: str):
os.rename(current_file_path, target_path)
| true |
b60a20e847445facb1a7733e9442d5ea5c51f7f0 | Python | elsampsa/valkka-examples | /api_level_2/qt/demo_analyzer.py | UTF-8 | 5,665 | 3.046875 | 3 | [
"MIT"
] | permissive | """
analyzer.py : A base class for analyzing image streams using OpenCV and an example movement detector.
Copyright 2018 Sampsa Riikonen
Authors: Sampsa Riikonen
This file is part of the Valkka Python3 examples library
Valkka Python3 examples library is free software: you can redistribute it and/or modify it under the terms of the MIT License. This code is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the MIT License for more details.
@file analyzer.py
@author Sampsa Riikonen
@date 2018
@version 1.5.2
@brief A base class for analyzing image streams using OpenCV and an example movement detector.
"""
import sys
import time
import cv2
import imutils
import numpy
from valkka.api2.tools import parameterInitCheck
class Analyzer(object):
"""A generic analyzer class
"""
parameter_defs={
"verbose" : (bool,False), # :param verbose: verbose output or not? Default: False.
"debug" : (bool,False) # :param debug: When this is True, will visualize on screen what the analyzer is doing (using OpenCV highgui)
}
def __init__(self,**kwargs):
parameterInitCheck(Analyzer.parameter_defs,kwargs,self,undefined_ok=True) # checks that kwargs is consistent with parameter_defs. Attaches parameters as attributes to self. This is a mother class: there might be more parameters not defined here from child classes
self.pre=self.__class__.__name__+" : "
# self.reset() # do this in child classes only ..
def reset(self):
"""If the analyzer has an internal state, reset it
"""
pass
def __call__(self,img):
"""Do the magic for image img. Shape of the image array is (i,j,colors)
"""
pass
def report(self,*args):
if (self.verbose):
print(self.pre,*args)
# pass
class MovementDetector(Analyzer):
"""A demo movement detector, written using OpenCV
"""
# return values:
state_same =0 # no state change
state_start =1 # movement started
state_stop =2 # movement stopped
parameter_defs={
"verbose" : (bool,False), # :param verbose: Verbose output or not? Default: False.
"debug" : (bool,False), # :param debug: When this is True, will visualize on screen what the analyzer is doing. Uses OpenCV highgui. WARNING: this will not work with multithreading/processing.
"deadtime" : (int,3), # :param deadtime: Movement inside this time interval belong to the same event
"treshold" : (float,0.001) # :param treshold: How much movement is an event (area of the image place)
}
def __init__(self,**kwargs):
super().__init__(**kwargs)
parameterInitCheck(MovementDetector.parameter_defs,kwargs,self) # checks that kwargs is consistent with parameter_defs. Attaches parameters as attributes to self
self.pre=self.__class__.__name__+" : "
self.reset()
def reset(self):
self.prevframe =None
self.wasmoving =False
self.t0 =0
def __call__(self,img):
# self.report("got frame :",img)
modframe = imutils.resize(img, width=500)
if (self.debug): cv2.imshow("SimpleMovementDetector_channels-modframe",modframe)
modframe = cv2.GaussianBlur(modframe, (21, 21), 0)
if (self.prevframe.__class__==None.__class__): # first frame
self.prevframe=modframe.copy()
self.report("First image found!")
result =self.state_same
else: # second or n:th frame
delta = cv2.absdiff(self.prevframe.max(2), modframe.max(2))
if (self.debug): cv2.imshow("SimpleMovementDetector_channels-delta0",delta)
delta = cv2.threshold(delta, 100, 1, cv2.THRESH_BINARY)[1] # TODO: how much treshold here..?
val=delta.sum()/(delta.shape[0]*delta.shape[1])
# print(self.pre,"MovementDetector: val=",val)
self.prevframe=modframe.copy()
if (val>=self.treshold): # one promille ok .. there is movement
self.t0=time.time()
self.report("==>MOVEMENT!")
if (self.wasmoving):
result =self.state_same
else:
self.t0_event=self.t0
self.wasmoving=True
self.report("==> NEW MOVEMENT EVENT!")
result =self.state_start
else: # no movement
dt=time.time()-self.t0 # how much time since the last movement event
if (dt>=self.deadtime and self.wasmoving): # lets close this event ..
dt_event=time.time()-self.t0_event
self.wasmoving=False
result =self.state_stop
self.report("==> MOVEMENT STOPPED!")
else:
result =self.state_same
if (self.debug): cv2.imshow("SimpleMovementDetector_channels-delta",delta*255)
if (self.debug):
# cv2.waitKey(40*25) # 25 fps
# cv2.waitKey(self.frametime)
cv2.waitKey(1)
return result
def test1():
"""Dummy-testing the movement analyzer
"""
analyzer=MovementDetector(verbose=True,debug=True)
img=numpy.zeros((1080//4,1920//4,3))
result=analyzer(img)
print("\nresult =",result,"\n")
img=numpy.zeros((1080//4,1920//4,3))
result=analyzer(img)
print("\nresult =",result,"\n")
img=numpy.ones((1080//4,1920//4,3))*100
result=analyzer(img)
print("\nresult =",result,"\n")
def test2():
"""TODO: demo here the OpenCV highgui with valkka
"""
pass
def main():
pre="main :"
print(pre,"main: arguments: ",sys.argv)
if (len(sys.argv)<2):
print(pre,"main: needs test number")
else:
st="test"+str(sys.argv[1])+"()"
exec(st)
if (__name__=="__main__"):
main()
| true |
639f0a46ca3f2f3711f5ba0dfe670365537b98a7 | Python | caoliang/ISSM-CA3 | /py_src/signal_functions.py | UTF-8 | 12,055 | 2.640625 | 3 | [] | no_license | '''
def pltDistances(dists, title, xlab="X", ylab="Y", clrmap="viridis"):
#imgplt = plt.figure(figsize=(4, 4))
plt.suptitle(title, fontsize=20)
plt.imshow(dists, interpolation='nearest', cmap=clrmap)
plt.gca().invert_yaxis()
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.grid()
plt.colorbar()
return imgplt
def plt_cost_and_path(acuCost, path, title, xlab="X", ylab="Y", clrmap="viridis"):
px = [pt[0] for pt in path]
py = [pt[1] for pt in path]
imgplt = pltDistances(acuCost,
title,
xlab=xlab,
ylab=ylab,
clrmap=clrmap)
plt.plot(px, py)
return imgplt
'''
import numpy as np
import pandas as pd
from keras.models import Model
from keras.layers import Dense, Input
from keras import regularizers
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from numpy.random import seed
from tensorflow import set_random_seed
def draw_time_series(ts_data_x, ts_data_y, index_start, length):
plt.figure()
oven_cols_index = ts_data_x.shape[1] - 1
n_parts = 5
for i in range(index_start, index_start + length):
fig, axes = plt.subplots(1, n_parts, figsize=(20, 5))
data_title = 'Time Series [ID: {0}]'.format(ts_data_y[i])
fig.suptitle(data_title, fontsize=12)
for j in range(n_parts):
if j == n_parts - 1:
# Oven temperature
data_series = ts_data_x[i, oven_cols_index:oven_cols_index + 1, :].flatten()
axes[j].set_title('Oven Temperature')
else:
data_series = ts_data_x[i, j:j + 1, :].flatten()
axes[j].set_title('({0},{1})'.format(i, j))
axes[j].get_xaxis().set_visible(False)
axes[j].plot(data_series)
plt.show()
from scipy import sparse
from scipy.sparse.linalg import spsolve
from scipy.signal import find_peaks as findPeaks
def alsbase(y, lam, p, niter=10):
L = len(y)
D = sparse.diags([1, -2, 1], [0, -1, -2], shape=(L, L - 2))
w = np.ones(L)
for i in range(niter):
W = sparse.spdiags(w, 0, L, L)
Z = W + lam * D.dot(D.transpose())
z = spsolve(Z, w * y)
w = p * (y > z) + (1 - p) * (y < z)
return z
def corr_baseline(y):
y_base = alsbase(y, 10 ^ 5, 0.000005, niter=50)
corr_y = y - y_base
return corr_y
def find_series_peaks(ts_data_x, ts_data_y, index_start, length, prominance, distance):
oven_cols_index = ts_data_x.shape[1] - 1
n_parts = 5
for i in range(index_start, index_start + length):
plt.figure()
fig, axes = plt.subplots(2, n_parts, figsize=(20, 6))
data_title = 'Time Series [ID: {0}]'.format(ts_data_y[i])
fig.suptitle(data_title, fontsize=20)
for j in range(n_parts):
if j == n_parts - 1:
# Oven temperature
data_series = ts_data_x[i, oven_cols_index:oven_cols_index + 1, :].flatten()
axes[0, j].set_title('Oven Temperature')
else:
data_series = ts_data_x[i, j:j + 1, :].flatten()
axes[0, j].set_title('({0},{1})'.format(i, j))
axes[0, j].get_xaxis().set_visible(False)
axes[0, j].plot(data_series)
for j in range(n_parts):
if j == n_parts - 1:
# Oven temperature
data_series = ts_data_x[i, oven_cols_index:oven_cols_index + 1, :].flatten()
axes[1, j].set_title('Oven Temperature')
else:
data_series = ts_data_x[i, j:j + 1, :].flatten()
axes[1, j].set_title('({0},{1})'.format(i, j))
axes[1, j].get_xaxis().set_visible(False)
corr_n = corr_baseline(data_series)
# Locate peaks
(pks_n, _) = findPeaks(corr_n, prominence=prominance, distance=distance)
axes[1, j].plot(corr_n)
axes[1, j].plot(pks_n, corr_n[pks_n], 'x')
# Locate trough
(pks_n, _) = findPeaks(corr_n * (-1), prominence=prominance, distance=distance)
# axes[j].plot(data_series)
axes[1, j].plot(pks_n, corr_n[pks_n], 'x')
plt.show()
def init_distance(x_series, y_series):
dists = np.zeros((len(y_series), len(x_series)))
for i in range(len(y_series)):
for j in range(len(x_series)):
dists[i, j] = (y_series[i] - x_series[j]) ** 2
return dists
def compute_acu_cost(dists):
acuCost = np.zeros(dists.shape)
acuCost[0, 0] = dists[0, 0]
for j in range(1, dists.shape[1]):
acuCost[0, j] = dists[0, j] + acuCost[0, j - 1]
for i in range(1, dists.shape[0]):
acuCost[i, 0] = dists[i, 0] + acuCost[i - 1, 0]
for i in range(1, dists.shape[0]):
for j in range(1, dists.shape[1]):
acuCost[i, j] = min(acuCost[i - 1, j - 1],
acuCost[i - 1, j],
acuCost[i, j - 1]) + dists[i, j]
return acuCost
def compute_dtw_path(x_series, y_series, dists, acuCost):
i = len(y_series) - 1
j = len(x_series) - 1
path = [[j, i]]
while (i > 0) and (j > 0):
if i == 0:
j = j - 1
elif j == 0:
i = i - 1
else:
if acuCost[i - 1, j] == min(acuCost[i - 1, j - 1],
acuCost[i - 1, j],
acuCost[i, j - 1]):
i = i - 1
elif acuCost[i, j - 1] == min(acuCost[i - 1, j - 1],
acuCost[i - 1, j],
acuCost[i, j - 1]):
j = j - 1
else:
i = i - 1
j = j - 1
path.append([j, i])
path.append([0, 0])
return path
def plt_warp(s1, s2, path, title, xlab="idx", ylab="Value"):
# plot_fig = plt.figure(figsize=(4, 4))
plt.title(title, fontsize=10)
for [idx1, idx2] in path:
plt.plot([idx1, idx2], [s1[idx1], s2[idx2]],
color="C4",
linewidth=2)
plt.plot(s1,
'o-',
color="C0",
markersize=3)
plt.plot(s2,
's-',
color="C1",
markersize=2)
plt.xlabel(xlab)
plt.ylabel(ylab)
return plt
def perform_dtw(x_series, y_series, title):
# Initialize distances
xy_distances = init_distance(x_series, y_series)
# pltDistances(xy_distances)
# Compute accumulative cost
xy_acu_cost = compute_acu_cost(xy_distances)
# pltDistances(xy_acu_cost, clrmap='Reds')
# Compute DTW path
xy_dtw_path = compute_dtw_path(x_series, y_series,
xy_distances, xy_acu_cost)
# Draw accumulative cost and path
# cost_path_fig = plt_cost_and_path(xy_acu_cost, xy_dtw_path, title,
# clrmap='Reds')
# plt.show()
# Draw warp on path
wrap_fit = plt_warp(x_series, y_series, xy_dtw_path, title,
xlab="", ylab="")
# plt.show()
# Perform DTW on OvenTemp and Chip TimeSeries
def perform_dtw_ts(ts_data_x, ts_data_y, ts_data_index_tup, chip_pos_index_tup):
ts_data_index1, ts_data_index2 = ts_data_index_tup
chip_pos_index1, chip_pos_index2 = chip_pos_index_tup
chip_ts1 = ts_data_x[ts_data_index1, chip_pos_index1, :].flatten()
chip_ts2 = ts_data_x[ts_data_index2, chip_pos_index2, :].flatten()
dtw_title = '{0}-{1},{2}-{3}'.format(ts_data_y[ts_data_index1], chip_pos_index1,
ts_data_y[ts_data_index2], chip_pos_index2)
perform_dtw(chip_ts1, chip_ts2, dtw_title)
def perform_dtw_ts_correction(ts_data_x, ts_data_y, ts_data_index_tup, chip_pos_index_tup):
ts_data_index1, ts_data_index2 = ts_data_index_tup
chip_pos_index1, chip_pos_index2 = chip_pos_index_tup
chip_ts1 = ts_data_x[ts_data_index1, chip_pos_index1, :].flatten()
chip_ts2 = ts_data_x[ts_data_index2, chip_pos_index2, :].flatten()
chip_corr1 = corr_baseline(chip_ts1)
chip_corr2 = corr_baseline(chip_ts2)
dtw_title = '{0}-{1},{2}-{3} (Corr)'.format(ts_data_y[ts_data_index1], chip_pos_index1,
ts_data_y[ts_data_index2], chip_pos_index2)
perform_dtw(chip_corr1, chip_corr2, dtw_title)
def perform_dtw_comparison_by_chip_pos(ts_data_x, ts_data_y, ts_data_index_tup, chip_pos_index_list):
chip_pos_index_len = len(chip_pos_index_list)
rows_plot = int(chip_pos_index_len / 3) + 1
if chip_pos_index_len % 3 == 0:
rows_plot -= 1
data_index = 0
while data_index < chip_pos_index_len:
cols_plot = 6
if chip_pos_index_len - data_index < 3:
cols_plot = (chip_pos_index_len - data_index) * 2
plt.figure(figsize=(20, 4))
for i in range(0, cols_plot, 2):
subplot_num = int('{0}{1}{2}'.format(1, cols_plot, i + 1))
axes = plt.subplot(subplot_num)
perform_dtw_ts(ts_data_x, ts_data_y, ts_data_index_tup, chip_pos_index_list[data_index])
axes.set_xticks([])
axes.set_yticks([])
subplot_num = int('{0}{1}{2}'.format(1, cols_plot, i + 2))
axes = plt.subplot(subplot_num)
perform_dtw_ts_correction(ts_data_x, ts_data_y, ts_data_index_tup, chip_pos_index_list[data_index])
axes.set_xticks([])
axes.set_yticks([])
data_index += 1
plt.show()
def perform_dtw_comparison_by_board_id(ts_data_x, ts_data_y, ts_data_index_list, chip_pos_index_list):
for ts_index_tup in ts_data_index_list:
perform_dtw_comparison_by_chip_pos(ts_data_x, ts_data_y, ts_index_tup, chip_pos_index_list)
def plt_burn_in_board(ts_data_x, ts_data_y, ts_data_index_list, clrmap="viridis"):
# Show board in x * 8 grid
cols_num = 0
rows_num = 0
data_index_len = len(ts_data_index_list)
total_rows = int(data_index_len / 8) + 1
if data_index_len % 8 == 0:
total_rows = total_rows - 1
if total_rows == 0:
total_rows = 1
if total_rows == 1:
fig, axes = plt.subplots(1, data_index_len, figsize=(20, 6))
else:
fig, axes = plt.subplots(total_rows, 8, figsize=(20, 6))
index_num = 0
for data_index in ts_data_index_list:
oven_data_plt = index_num % 2 == 1
if total_rows == 1:
if data_index_len == 1:
plt_axes = axes
else:
plt_axes = axes[cols_num]
else:
plt_axes = axes[rows_num, cols_num]
# print('subplot_num: ({0}, {1})'.format(rows_num, cols_num))
cols_num += 1
if cols_num >= 8:
cols_num = 0
rows_num += 1
# Exclude last oven temp data
ts_chip_x = ts_data_x[data_index, 0:640, :].reshape(640, 20)
ts_chip_mean = np.mean(ts_chip_x, axis=1).reshape(32, 20)
# ts_oven_x = ts_data_x[data_index, 640, :].reshape(1, 20)
# ts_oven_mean = np.mean(ts_oven_x, axis=1)
# ts_x = np.zeros((32, 21))
# ts_x[:, :-1] = ts_chip_mean
# ts_x[:, -1] = [ts_oven_mean] * 32
title = 'ID {0}'.format(ts_data_y[data_index])
# imgplt = plt.figure(figsize=(6, 6))
plt_axes.set_title(title, fontsize=20)
# plt.grid(b=True, which='major', axis='both', color='blue', linestyle='-', linewidth=1)
plt_axes.imshow(ts_chip_mean, interpolation='nearest', cmap=clrmap)
# plt_axes.set_xlabel(xlab)
# plt_axes.set_ylabel(ylab)
# Remove x and y axis ticks
plt_axes.set_xticks([0, 5, 10, 15, 19])
plt_axes.set_yticks([0, 5, 10, 15, 20, 25, 31])
index_num += 1
if total_rows > 1:
while cols_num < 8 and rows_num < total_rows:
fig.delaxes(axes[rows_num][cols_num])
cols_num += 1
| true |
6894fe4b94b03ed198aaa9d6fa082d150c18bba3 | Python | sauravgarg540/executors | /jinahub/encoders/text/SpacyTextEncoder/spacy_text_encoder.py | UTF-8 | 4,711 | 2.5625 | 3 | [
"Apache-2.0"
] | permissive | __copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import List, Dict, Optional
import numpy as np
import torch
import spacy
from jina import Executor, DocumentArray, requests
from jina.logging.logger import JinaLogger
class SpacyTextEncoder(Executor):
"""
:class:`SpacyTextEncoder` encodes ``Document`` using models offered by Spacy
:param lang: pre-trained spaCy language pipeline (model name HashEmbedCNN by default for tok2vec), `en_core_web_sm`
by default. Allows models `en_core_web_md`, `en_core_web_lg`, `en_core_web_trf`. Refer https://spacy.io/models/en.
:param use_default_encoder: if True will use parser component,
otherwise tok2vec implementation will be chosen,
by default False.
:param default_traversal_paths: fallback traversal path in case there is not traversal path sent in the request
:param device: device to use for encoding ['cuda', 'cpu] - if not set, the device is detected automatically
:param args: Additional positional arguments.
:param kwargs: Additional positional arguments.
"""
SPACY_COMPONENTS = [
'tagger',
'parser',
'ner',
'senter',
'tok2vec',
'lemmatizer',
'attribute_ruler',
]
def __init__(self,
lang: str = 'en_core_web_sm',
use_default_encoder: bool = False,
default_traversal_paths: List[str] = ['r'],
device: Optional[str] = None,
*args, **kwargs):
"""Set constructor."""
super().__init__(*args, **kwargs)
self.lang = lang
self.use_default_encoder = use_default_encoder
self.default_traversal_paths = default_traversal_paths
self.logger = JinaLogger(self.__class__.__name__)
if not device:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = device
if self.device == 'cuda':
spacy.require_gpu()
try:
self.spacy_model = spacy.load(self.lang)
# Disable everything as we only requires certain pipelines to turned on.
ignored_components = []
for comp in self.SPACY_COMPONENTS:
try:
self.spacy_model.disable_pipe(comp)
except Exception:
ignored_components.append(comp)
self.logger.info(f'Ignoring {ignored_components} pipelines as it does not available on the model package.')
except IOError:
self.logger.error(
f'spaCy model for language {self.lang} can not be found. Please install by referring to the '
'official page https://spacy.io/usage/models.'
)
raise
if self.use_default_encoder:
try:
self.spacy_model.enable_pipe('parser')
except ValueError:
self.logger.error(
f'Parser for language {self.lang} can not be found. The default sentence encoder requires'
'DependencyParser to be trained. Please refer to https://spacy.io/api/tok2vec for more clarity.'
)
raise
else:
try:
self.spacy_model.enable_pipe('tok2vec')
except ValueError:
self.logger.error(
f'TokenToVector is not available for language {self.lang}. Please refer to'
'https://github.com/explosion/spaCy/issues/6615 for training your own recognizer.'
)
raise
@requests
def encode(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs):
"""
Encode all docs with text and store the encodings in the embedding attribute of the docs.
:param docs: documents sent to the encoder. The docs must have `text` as content
:param parameters: dictionary to define the `traversal_path` and the `batch_size`.
For example,
`parameters={'traversal_paths': ['r']}`
will set the parameters for traversal_paths that is actually used`
"""
if docs:
trav_paths = parameters.get('traversal_paths', self.default_traversal_paths)
# traverse thought all documents which have to be processed
flat_docs = docs.traverse_flat(trav_paths)
# filter out documents without text
filtered_docs = [doc for doc in flat_docs if doc.text is not None]
for doc in filtered_docs:
spacy_doc = self.spacy_model(doc.text)
doc.embedding = spacy_doc.vector
| true |
906e1bea08173e63fdcde7bbc8cb540026bf5112 | Python | corylstewart/courseraAlgo | /Algorithms2/Week3/knapsack.py | UTF-8 | 2,714 | 3.265625 | 3 | [] | no_license | import sys
from operator import itemgetter
import time
sys.setrecursionlimit(99000)
def get_items(filename):
items = list()
with open(filename) as f:
capacity = [int(x) for x in f.readline().split()][0]
for item in f.readlines():
items.append([int(x) for x in item.split()])
items[-1].append(float(items[-1][0])/items[-1][1])
#items.sort(reverse=True)
return capacity, items
def make_array(capacity, items):
return [[0 for y in range(len(items)+1)] for x in range(capacity+1)]
def make_narrow(capacity):
return [[0,0] for x in xrange(capacity+1)]
def remove_col(grid):
new_grid = list()
for row in grid:
new_grid.append([row[1], 0])
return new_grid
def print_grid(grid):
for row in grid:
print row
print ''
#test data
capacity = 6
items = [[3, 4], [2, 3], [4, 2], [4, 3], [1, 5]]
def knapsack(capacity, items):
grid = make_array(capacity, items)
i = 0
while items:
item = items.pop(0)
weight = item[1]
for x in xrange(capacity+1):
value = 0
if weight <= x:
grid[x][i+1] = max((grid[x][i], grid[x-weight][i] + item[0]))
else:
grid[x][i+1] = grid[x][i]
i += 1
return grid[-1][-1]
def knap_2m(capacity, items):
grid = make_narrow(capacity)
while items:
item = items.pop(0)
weight = item[1]
for x in xrange(capacity+1):
if weight <= x:
value = item[0]
grid[x][1] = max(grid[x][0], grid[x-weight][0] + value)
else:
grid[x][1] = grid[x][0]
grid = remove_col(grid)
return grid[-1][0]
cache = dict()
def knap_rec(capacity, items, i):
global cache
if i <= 0 or capacity <= 0:
return 0
key = (capacity, i)
if key in cache:
return cache[key]
weight = items[i][1]
left = knap_rec(capacity, items, i-1)
if weight <= capacity:
value = items[i][0]
bottom = knap_rec(capacity-weight, items, i-1) + value
new_weight = max(left, bottom)
cache[key] = new_weight
return new_weight
else:
cache[key] = left
return left
# nm size array
#capacity, items = get_items('knapsack1.txt')
#print knapsack(capacity, items) #2493893
# 2*m size array
#capacity, items = get_items('knapsack1.txt')
#print knap_2m(capacity, items) #2493893
# using caching and recusion
cache = dict()
#capacity, items = get_items('knapsack1.txt')
#print knap_rec(capacity, items, len(items)-1) #2493893
cache = dict()
capacity, items = get_items('knapsack_big.txt')
print knap_rec(capacity, items, len(items)-1)
| true |
87aebb60f713e45b4a359b39cdbc790a7c130f0a | Python | Ianssmith/data-structures | /origamiViz_project/origamiViz/protoviz.py | UTF-8 | 487 | 2.71875 | 3 | [] | no_license |
# coding: utf-8
# In[103]:
import numpy as np
import pandas as pd
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import seaborn as sns
# In[104]:
df = pd.read_json("data/crane.json")
df.head()
# In[107]:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# In[108]:
ax.plot(df.x, df.y, df.z, color="r", linewidth=1, alpha=0.5)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
plt.show()
# In[ ]:
# In[ ]:
# In[ ]:
| true |
538c4c306786175ae7b33182e199fdac86ee100a | Python | markbirss/cardkb | /ascii_codes.py | UTF-8 | 4,123 | 2.703125 | 3 | [] | no_license | import uinput
ascii = {
# number row
0x1B: [uinput.KEY_ESC],
0x31: [uinput.KEY_1],
0x32: [uinput.KEY_2],
0x33: [uinput.KEY_3],
0x34: [uinput.KEY_4],
0x35: [uinput.KEY_5],
0x36: [uinput.KEY_6],
0x37: [uinput.KEY_7],
0x38: [uinput.KEY_8],
0x39: [uinput.KEY_9],
0x30: [uinput.KEY_0],
0x08: [uinput.KEY_BACKSPACE],
# top row
0x09: [uinput.KEY_TAB],
0x71: [uinput.KEY_Q],
0x77: [uinput.KEY_W],
0x65: [uinput.KEY_E],
0x72: [uinput.KEY_R],
0x74: [uinput.KEY_T],
0x79: [uinput.KEY_Y],
0x75: [uinput.KEY_U],
0x69: [uinput.KEY_I],
0x6F: [uinput.KEY_O],
0x70: [uinput.KEY_P],
# home row
0x61: [uinput.KEY_A],
0x73: [uinput.KEY_S],
0x64: [uinput.KEY_D],
0x66: [uinput.KEY_F],
0x67: [uinput.KEY_G],
0x68: [uinput.KEY_H],
0x6A: [uinput.KEY_J],
0x6B: [uinput.KEY_K],
0x6C: [uinput.KEY_L],
0x0D: [uinput.KEY_ENTER],
# bottom row
0x7A: [uinput.KEY_Z],
0x78: [uinput.KEY_X],
0x63: [uinput.KEY_C],
0x76: [uinput.KEY_V],
0x62: [uinput.KEY_B],
0x6E: [uinput.KEY_N],
0x6D: [uinput.KEY_M],
0x2C: [uinput.KEY_COMMA],
0x2E: [uinput.KEY_DOT],
0x20: [uinput.KEY_SPACE],
# arrow keys
0xB4: [uinput.KEY_LEFT],
0xB5: [uinput.KEY_UP],
0xB6: [uinput.KEY_DOWN],
0xB7: [uinput.KEY_RIGHT],
# number row symbols
0x21: [uinput.KEY_LEFTSHIFT, uinput.KEY_1],
0x40: [uinput.KEY_LEFTSHIFT, uinput.KEY_2],
0x23: [uinput.KEY_LEFTSHIFT, uinput.KEY_3],
0x24: [uinput.KEY_LEFTSHIFT, uinput.KEY_4],
0x25: [uinput.KEY_LEFTSHIFT, uinput.KEY_5],
0x5E: [uinput.KEY_LEFTSHIFT, uinput.KEY_6],
0x26: [uinput.KEY_LEFTSHIFT, uinput.KEY_7],
0x2A: [uinput.KEY_LEFTSHIFT, uinput.KEY_8],
0x28: [uinput.KEY_LEFTSHIFT, uinput.KEY_9],
0x29: [uinput.KEY_LEFTSHIFT, uinput.KEY_0],
0x7B: [uinput.KEY_LEFTSHIFT, uinput.KEY_LEFTBRACE],
0x7D: [uinput.KEY_LEFTSHIFT, uinput.KEY_RIGHTBRACE],
# top row symbols
0x5B: [uinput.KEY_LEFTBRACE],
0x5D: [uinput.KEY_RIGHTBRACE],
0x2f: [uinput.KEY_SLASH],
0x5C: [uinput.KEY_BACKSLASH],
0x7C: [uinput.KEY_LEFTSHIFT, uinput.KEY_BACKSLASH],
0x7E: [uinput.KEY_LEFTSHIFT, uinput.KEY_GRAVE],
0x27: [uinput.KEY_APOSTROPHE],
0x22: [uinput.KEY_LEFTSHIFT, uinput.KEY_APOSTROPHE],
# bottom row symbols
0x3B: [uinput.KEY_SEMICOLON],
0x3A: [uinput.KEY_LEFTSHIFT, uinput.KEY_SEMICOLON],
0x60: [uinput.KEY_GRAVE],
0x2B: [uinput.KEY_LEFTSHIFT, uinput.KEY_EQUAL],
0x2D: [uinput.KEY_MINUS],
0x5F: [uinput.KEY_LEFTSHIFT, uinput.KEY_MINUS],
0x3D: [uinput.KEY_EQUAL],
0x3F: [uinput.KEY_LEFTSHIFT, uinput.KEY_SLASH],
0x3C: [uinput.KEY_LEFTSHIFT, uinput.KEY_COMMA],
0x3E: [uinput.KEY_LEFTSHIFT, uinput.KEY_DOT],
# top row capitals
0x51: [uinput.KEY_LEFTSHIFT, uinput.KEY_Q],
0x57: [uinput.KEY_LEFTSHIFT, uinput.KEY_W],
0x45: [uinput.KEY_LEFTSHIFT, uinput.KEY_E],
0x52: [uinput.KEY_LEFTSHIFT, uinput.KEY_R],
0x54: [uinput.KEY_LEFTSHIFT, uinput.KEY_T],
0x59: [uinput.KEY_LEFTSHIFT, uinput.KEY_Y],
0x55: [uinput.KEY_LEFTSHIFT, uinput.KEY_U],
0x49: [uinput.KEY_LEFTSHIFT, uinput.KEY_I],
0x4F: [uinput.KEY_LEFTSHIFT, uinput.KEY_O],
0x50: [uinput.KEY_LEFTSHIFT, uinput.KEY_P],
# home row capitals
0x41: [uinput.KEY_LEFTSHIFT, uinput.KEY_A],
0x53: [uinput.KEY_LEFTSHIFT, uinput.KEY_S],
0x44: [uinput.KEY_LEFTSHIFT, uinput.KEY_D],
0x46: [uinput.KEY_LEFTSHIFT, uinput.KEY_F],
0x47: [uinput.KEY_LEFTSHIFT, uinput.KEY_G],
0x48: [uinput.KEY_LEFTSHIFT, uinput.KEY_H],
0x4A: [uinput.KEY_LEFTSHIFT, uinput.KEY_J],
0x4B: [uinput.KEY_LEFTSHIFT, uinput.KEY_K],
0x4C: [uinput.KEY_LEFTSHIFT, uinput.KEY_L],
# bottom row capitals
0x5A: [uinput.KEY_LEFTSHIFT, uinput.KEY_Z],
0x58: [uinput.KEY_LEFTSHIFT, uinput.KEY_X],
0x43: [uinput.KEY_LEFTSHIFT, uinput.KEY_C],
0x56: [uinput.KEY_LEFTSHIFT, uinput.KEY_V],
0x42: [uinput.KEY_LEFTSHIFT, uinput.KEY_B],
0x4E: [uinput.KEY_LEFTSHIFT, uinput.KEY_N],
0x4D: [uinput.KEY_LEFTSHIFT, uinput.KEY_M],
} | true |
abc507b58d50c45919d9c7aeddee37e10a2d17d7 | Python | tinproject/adventofcode2018 | /6/day.py | UTF-8 | 4,411 | 3.171875 | 3 | [] | no_license | from collections import Counter
from itertools import chain
from functools import partial
# import string
def get_coordinates(data):
clean_data = (l.strip() for l in data if l.strip())
coords = []
for d in clean_data:
x = int(d.split(",")[0].strip())
y = int(d.split(",")[1].strip())
coords.append((x, y))
return sorted(coords)
def get_bounding_box(coords):
min_x = min(coords, key=lambda i: i[0])[0]
max_x = max(coords, key=lambda i: i[0])[0]
min_y = min(coords, key=lambda i: i[1])[1]
max_y = max(coords, key=lambda i: i[1])[1]
return [(min_x, min_y), (max_x, max_y)]
def point_has_infinite_area(coords, point):
coords = list(coords)
x = point[0]
y = point[1]
points_in_border = (
next(chain(filter(lambda p: p[0] >= x and p[1] > y, coords)), False),
next(chain(filter(lambda p: p[0] < x and p[1] >= y, coords)), False),
next(chain(filter(lambda p: p[0] <= x and p[1] < y, coords)), False),
next(chain(filter(lambda p: p[0] > x and p[1] <= y, coords)), False),
)
return not all(points_in_border)
def distance(point1, point2):
return abs(point2[0] - point1[0]) + abs(point2[1] - point1[1])
def get_points_with_infinite_area(coords):
filter_infinite_area = partial(point_has_infinite_area, coords)
return list(filter(filter_infinite_area, coords))
def get_shortest_point(place, points):
distances = sorted(((distance(place, p), p) for p in points))
if distances[0][0] == distances[1][0]:
# Same distance to more than one point
return None
return distances[0][1]
def get_distance_matrix(coords, border):
# infinite_area_points = get_points_with_infinite_area(coords)
bounding_box = get_bounding_box(coords)
max_x = bounding_box[1][0] + border + 10
max_y = bounding_box[1][1] + border + 10
distance_matrix = dict()
for i in range(-border, max_x):
for j in range(-border, max_y):
place = (i, j)
distance_matrix[place] = get_shortest_point(place, coords)
# # Get map
# point_ids = {p: i for p, i in zip(coords, chain(string.ascii_letters, string.digits))}
# point_ids[None] = "·"
# dm = {k: point_ids[v] for k, v in sorted(distance_matrix.items())}
# for p in coords:
# dm[p] = "#"
# out = []
# for i in range(-border, max_x):
# line = []
# for j in range(-border, max_y):
# line.append(dm[(i, j)])
# out.append("".join(line))
# print(out)
return distance_matrix
def get_largest_area_point(coords):
dm1 = get_distance_matrix(coords, 0)
dm2 = get_distance_matrix(coords, 100)
c1 = Counter(dm1.values())
c2 = Counter(dm2.values())
non_expansive = [(p, d) for p, d in c1.items() if c2[p] == d]
max_area = sorted(non_expansive, key=lambda x: x[1], reverse=True)[0]
return max_area
def get_largest_area(coords):
point, size = get_largest_area_point(coords)
return size
def get_cumulative_distance(place, points):
cum_distance = sum((distance(place, p) for p in points))
return cum_distance
def get_region_size_with_less_distance(coords, max_cum_distance):
border = 0
bounding_box = get_bounding_box(coords)
max_x = bounding_box[1][0] + border + 1
max_y = bounding_box[1][1] + border + 1
distance_matrix = dict()
for i in range(-border, max_x):
for j in range(-border, max_y):
place = (i, j)
cum_distance = get_cumulative_distance(place, coords)
if cum_distance < max_cum_distance:
distance_matrix[place] = True
else:
distance_matrix[place] = False
bigger_region_size = Counter(distance_matrix.values())[True]
return bigger_region_size
def get_data():
with open('./input', 'rt') as f:
values = f.readlines()
return values
def solve():
data = get_data()
coords = get_coordinates(data)
largest_area_size = get_largest_area(coords)
# Part 1
print(f"Part1 - The largest non infinite area is: {largest_area_size}")
# Part 2
max_cum_distance = 10000
region_size = get_region_size_with_less_distance(coords, max_cum_distance)
print(f"Part2 - The biggest region size with cumulative distance less than {max_cum_distance} is: {region_size}")
if __name__ == "__main__":
solve()
| true |
3485d5567c328d32332831501cd6d9feaa158d8a | Python | vishnusak/DojoAssignments | /PylotMVC-NinjaGold/app/controllers/Ninja.py | UTF-8 | 1,899 | 2.671875 | 3 | [] | no_license | from system.core.controller import Controller, redirect, request, session
from random import randint
from time import strftime
from json import dumps
class Ninja(Controller):
def __init__(self, action):
super(Ninja, self).__init__(action)
def reset(self):
session.clear()
return redirect('/')
def index(self):
if 'score' not in session:
session['score'] = 0
if 'activities' not in session:
session['activities'] = []
return self.load_view('ninjagold.html')
def process(self, building):
activity = {}
if building == 'farm':
score = randint(10, 20)
activity['color'] = 'green'
activity['msg'] = 'Earned {} golds from the farm!'.format(score)
elif building == 'cave':
score = randint(5, 10)
activity['color'] = 'green'
activity['msg'] = 'Earned {} golds from the cave!'.format(score)
elif building == 'house':
score = randint(2, 5)
activity['color'] = 'green'
activity['msg'] = 'Earned {} golds from the house!'.format(score)
else:
score = randint(-50, 50)
if score > 0:
activity['color'] = 'green'
activity['msg'] = 'Yay! Won {} golds at the casino!'.format(score)
elif score == 0:
activity['color'] = 'black'
activity['msg'] = "Well! Atleast didn't lose anything at the casino!"
else:
activity['color'] = 'red'
activity['msg'] = "Entered the casino and lost {} golds... Ouch!".format(abs(score))
activity['time'] = strftime('%Y/%m/%d %I:%M %p')
session['score'] += score
activity['score'] = session['score']
session['activities'].insert(0, activity)
return dumps(activity)
| true |
365ca6563ecd2b67db6d81bf57fabded6d24161b | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_135/3977.py | UTF-8 | 819 | 2.796875 | 3 | [] | no_license | import os, sys
lines = tuple(open(sys.argv[1], 'r'))
testCasesCount = int(lines[0])
linesI = 1
while testCasesCount > 0:
guess1 = lines[linesI].rstrip()
guess1Rows = [lines[linesI+1].rstrip(),lines[linesI+2].rstrip(),lines[linesI+3].rstrip(),lines[linesI+4].rstrip()]
guess2 = lines[linesI+5].rstrip()
guess2Rows = [lines[linesI+6].rstrip(),lines[linesI+7].rstrip(),lines[linesI+8].rstrip(),lines[linesI+9].rstrip()]
testCasesCount -= 1
row1 = guess1Rows[int(guess1)-1].rsplit(" ")
row2 = guess2Rows[int(guess2)-1].rsplit(" ")
result = list(set(row1).intersection(row2))
case = str(linesI/10 + 1)
if len(result) == 1:
print "Case #"+case+": "+result[0]
elif len(result) > 1:
print "Case #"+case+": Bad magician!"
elif len(result) == 0:
print "Case #"+case+": Volunteer cheated!"
linesI += 10
| true |
8323753d4c8f5e6a7b6d67de7db386ee0f0ef1d6 | Python | PingchuanMa/Respect-Learning | /tools/plot_rewards.py | UTF-8 | 1,261 | 2.78125 | 3 | [] | no_license | from argparse import ArgumentParser
import os
import matplotlib.pyplot as plt
import numpy as np
import json
base_dir = os.path.dirname(os.path.abspath(__file__)) + '/../'
result_path = base_dir + 'results/'
def plot_rewards(reward_list, title, order=6):
x = np.arange(len(reward_list))
plt.figure('Training Result')
fit = np.polyfit(x, reward_list, order)
fit = np.polyval(fit, x)
plt.plot(x, reward_list, color='r', alpha=0.5)
plt.plot(x, fit, lw=2, label=title, color='r')
plt.xlabel('Iteration Number')
plt.ylabel('Episode Reward')
plt.grid(True)
save_path = result_path + 'figure/'
os.makedirs(save_path, exist_ok=True)
plt.savefig(save_path + title + ".png")
plt.close()
def main():
parser = ArgumentParser(description='Plot.')
parser.add_argument('--id', type=str)
parser.add_argument('--iter', type=str, default='final')
parser.add_argument('--order', type=int, default=6)
args = parser.parse_args()
reward_json_path = result_path + 'reward/' + args.id + '_' + args.iter + '.json'
with open(reward_json_path, 'r') as file:
reward_list = list(json.load(file))
plot_rewards(reward_list, args.id, args.order)
if __name__ == '__main__':
main()
| true |
8782a37e2f3ace34b10b812e16fb3e8a3fb87057 | Python | mariabg/rentalClassifier | /scripts/playWithData.py | UTF-8 | 953 | 2.90625 | 3 | [] | no_license | import sys
import pandas as pd
import numpy as np
def main():
df = pd.read_csv('15_03_2017_calendar.csv')
# ['listing_id' 'date' 'available' 'price']
print "calendar listing head", df.columns.values
# print df.head()
# print df["date"].max(), df["date"].min()
print
# print "\n\n\n"
# df = pd.read_csv('listings.csv')
# print "general listing head", df.columns.values
# print df.calendar_updated.head()
# print df.neighbourhood_cleansed.head()
# use lambda or the code bellow for this -> print df.loc[df.street.contains('Montferrutx')]
# df = pd.DataFrame({"body": ["ball", "red BALL", "round sphere"]})
# urba = df[df["street"].str.contains("Montferrutx")]
# print urba
# print df.loc[df.experiences_offered != 'none'].values
# mafioso = df.loc[df.calculated_host_listings_count == 752]
# print mafioso.host_url
# print df["calculated_host_listings_count"].max()
main()
| true |
41ce4d2c863f16d8ba2b3f53740a984cc26e7d73 | Python | githubfun/stockcat | /spider/stock/stock/spiders/qqusdaily.py | UTF-8 | 3,029 | 2.671875 | 3 | [] | no_license | #!/usr/bin/python
#-*- coding: UTF-8 -*-
#author: fox
#desc: 抓取qq上每股的每日总览数据
#date: 2014/10/04
import sys, re, json, random
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from stock.items import StockDataItem
class QQUsDailySpider(BaseSpider):
name = "qqusdaily"
allowed_domains = ["qt.gtimg.cn", "qtimg.cn"]
code2id = dict()
start_urls = []
# 传入列表文件, 批量请求
def __init__(self, filename, request_count, day):
self.day = day
code_list = []
file = open(filename)
while True:
line = file.readline().strip("\r\n")
if not line:
break
fields = line.split()
#print fields, fields[1:]
self.code2id[fields[1][2:]] = int(fields[0])
code_list.append(fields[1])
file.close()
count = len(code_list)
offset = 0
while offset < count:
end = min(offset + int(request_count), count)
code_str = ",".join(code_list[offset:end])
offset = end
url = "http://qt.gtimg.cn/r=" + str(random.random()) + "q=" + code_str
print url
self.start_urls.append(url)
def parse(self, response):
body = response.body
lines = body.strip("\n").split(";")
for line in lines:
if len(line) == 0:
continue
parts = line.split("=")
#print line, parts
content = parts[1].strip('"')
#print content
fields = content.split("~")
#print fields
# 当日停牌则不能存入
open_price = float(fields[5])
close_price = float(fields[3])
if open_price == 0.0 or close_price == 0.0:
continue
item = StockDataItem()
show_code = fields[2]
rindex = show_code.rfind(".")
if -1 == rindex:
continue
item['code'] = stock_code = fields[2][0:rindex]
#print stock_code
item['sid'] = self.code2id[stock_code]
item['day'] = int(self.day)
item['last_close_price'] = fields[4]
item['open_price'] = open_price
item['high_price'] = fields[33]
item['low_price'] = fields[34]
item['close_price'] = close_price
item['vary_price'] = fields[31]
item['vary_portion'] = fields[32]
# 成交量转化为手
item['volume'] = int(fields[36])
# 美股缺少成交额的数据, 统一为0
item['amount'] = int(fields[37])
# 美股缺少换手率
item['exchange_portion'] = 0.0
# 计算振幅
item['swing'] = (float(item['high_price']) - float(item['low_price'])) / float(item['last_close_price']) * 100
#print item
yield item
| true |
5b01b955ae6bdd04335a73d87be562a6c75f88be | Python | iSaikyou/Praktikum_GUI | /Modul 3/Aritmatika.py | UTF-8 | 462 | 3.796875 | 4 | [] | no_license | class Aritmatika :
@staticmethod
def tambah(a,b) :
return a + b
@staticmethod
def kurang(a,b) :
return a - b
@staticmethod
def bagi(a,b) :
return a / b
@staticmethod
def bagi_int(a,b) :
return a // b
@staticmethod
def pangkat(a,b) :
return a ** b
# Langsung call class dan method
print (Aritmatika.tambah(5,5))
# Bikin object dlu
objekA = Aritmatika()
print (objekA.pangkat(2, 3)) | true |
24fae5b7c7a690ca302c7b5d199ca165594adca3 | Python | anderson-github-classroom/csc-369-student | /labs/Lab2.py | UTF-8 | 3,929 | 3.46875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,md,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.8.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Lab 2 - Creating an inverted index (again) and Word Counting
#
# Please review Lab 1 before proceeding. Part of this lab is creating an inverted index, but using Ray instead of Parallel. We'll then move onto the more complicated word counting example.
# + slideshow={"slide_type": "skip"}
# %load_ext autoreload
# %autoreload 2
# + slideshow={"slide_type": "skip"}
import Lab2_helper
# + slideshow={"slide_type": "skip"}
import ray
ray.init(ignore_reinit_error=True)
# + slideshow={"slide_type": "skip"}
display_available = True
try:
display('Verifying you can use display')
from IPython.display import Image
except:
display=print
display_available = False
try:
import pygraphviz
graphviz_installed = True # Set this to False if you don't have graphviz
except:
graphviz_installed = False
import os
from pathlib import Path
home = str(Path.home())
def isnotebook():
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
import sys
sys.path.insert(0,f'{home}/csc-369-student/tests')
import test_Lab2
# -
# ## Inverted Index
# + [markdown] slideshow={"slide_type": "subslide"}
# **Exercise 1:**
#
# You have already written most of what you need to use Ray to construct distributed inverted indices. Here I want you to modify Lab2_helper.py to use Ray and return the final inverted index. I'm supplying the code that divides your books into three sets.
# + slideshow={"slide_type": "subslide"}
import glob
def get_book_files(input_dir):
return glob.glob(f"{input_dir}/*.txt")
# + slideshow={"slide_type": "subslide"}
group1 = get_book_files(f"{home}/csc-369-student/data/gutenberg/group1")
group2 = get_book_files(f"{home}/csc-369-student/data/gutenberg/group2")
group3 = get_book_files(f"{home}/csc-369-student/data/gutenberg/group3")
# + slideshow={"slide_type": "subslide"}
index = Lab2_helper.merge([group1,group2,group3])
# -
index['Education']
# + slideshow={"slide_type": "subslide"}
index['Education']
# -
# clean up memory
index = None
import gc
gc.collect()
# ## Word Counting
# Now consider a different problem of common interest. Suppose we have a large corpus (fancy word common in natural language processing) and we want to calculate the number of times a word appears. We could try to hack our inverted index, but let's insert the requirement that this must be a clean implementation. In other words, I'll be manually reviewing your design and making you redo the assignment if it isn't "clean".
# + [markdown] slideshow={"slide_type": "subslide"}
# **Exercise 2:**
#
# Write a function that counts the words in a book. Output format shown below. You do not have to worry about punctuation and capitalization. In other words, please stick to simple f.readlines() and line.split(" "). Do not strip anything out.
# -
counts = Lab2_helper.count_words(group1[0])
counts
# + [markdown] slideshow={"slide_type": "subslide"}
# **Exercise 3**
#
# Now let's distribute this using Ray. Please implement a function that parallelizes the word counting and subsequent merges.
# -
merged_counts = Lab2_helper.merge_count_words([group1,group2,group3])
merged_counts['things']
# + slideshow={"slide_type": "skip"}
# Don't forget to push!
# -
| true |
a1a0ee1b304a7a2b58d79b924f27520febff3f50 | Python | snprpc/R_Interpreter | /snprpc/grammar/test_parserstruct.py | UTF-8 | 7,739 | 2.921875 | 3 | [] | no_license | from snprpc.grammar.ParserStruct import *
from snprpc.grammar.R_Parser import *
from snprpc.grammar.Parser import *
from snprpc.grammar.Statement import *
# 单元测试——测试简单的语法匹配器1.0
# 测试 Concat 类
# 定义关键子的tag值 ‘RESERVED’
# 通过 Contat 定义简单的加法文法匹配器 parser1
# 通过运算符重载定义简单的加法文法匹配器 parser2
# 模拟词法分析器的输出 tokens
# 构建抽象语法树 ast
def unit_addGrammar():
RESERVED = 'RESERVED'
parser1 = Concat(Concat(Tag('INT'), Reserved('+', 'RESERVED')), Tag('INT'))
parser2 = Tag('INT') + Reserved('+', RESERVED) + Tag('INT')
tokens = [
['1', 'INT'],
['+', 'RESERVED'],
['4', 'INT']
]
ast = parser1(tokens, 0)
ast_result = ast.value
print(ast_result)
# 单元测试——分句器1.0
# 通过定义 ‘;’ 文法匹配器对程序进行断句
# 通过完成的简单的算数运算文法分析器对输入 tokens 进行文法分析
# 构建抽象语法树 ast
def unit_Aesp():
tokens = [['a', 'VARIABLE'],
['=', 'RESERVED'],
['1', 'INTEGER'],
[';', 'RESERVED'],
['b', 'VARIABLE'],
['=', 'RESERVED'],
['a', 'VARIABLE'],
['+', 'RESERVED'],
['1', 'INTEGER'],
[';', 'RESERVED']]
ast = parse_result = R_parse(tokens)
# 单元测试 优先级测试1.0
def unit_precdence():
tokens = [
['a', 'VARIABLE'], ['=', 'RESERVED'], ['1', 'INTEGER'], [';', 'RESERVED'],
['b', 'VARIABLE'], ['=', 'RESERVED'], ['1', 'INTEGER'], ['+', 'RESERVED'], ['a', 'VARIABLE'], ['*', 'RESERVED'], ['3', 'INTEGER'], [';', 'RESERVED']
]
ast = R_parse(tokens)
# 单元测试 条件判断语句
def unit_if_stmt():
tokens=[['a', 'VARIABLE'], ['=', 'RESERVED'], ['1', 'INTEGER'], [';', 'RESERVED'],
['b', 'VARIABLE'], ['=', 'RESERVED'], ['2', 'INTEGER'], [';', 'RESERVED'],
['if', 'RESERVED'], ['(', 'RESERVED'], ['a', 'VARIABLE'], ['>', 'RESERVED'], ['b', 'VARIABLE'], [')', 'RESERVED'],
['{', 'RESERVED'], ['a', 'VARIABLE'], ['=', 'RESERVED'], ['b', 'VARIABLE'], ['}', 'RESERVED'],
['else', 'RESERVED'], ['{', 'RESERVED'], ['b', 'VARIABLE'], ['=', 'RESERVED'], ['a', 'VARIABLE'], ['}', 'RESERVED'], [';', 'RESERVED']]
ast = R_parse(tokens)
return ast
# 单元测试 while循环语句
def unit_while_stmt():
tokens = [
['a', 'VARIABLE'], ['=', 'RESERVED'], ['5', 'INTEGER'], [';', 'RESERVED'],
['result', 'VARIABLE'], ['=', 'RESERVED'], ['0', 'INTEGER'], [';', 'RESERVED'],
['while', 'RESERVED'], ['(', 'RESERVED'], ['a', 'VARIABLE'], ['>=', 'RESERVED'], ['0', 'INTEGER'], [')', 'RESERVED'],
['{', 'RESERVED'], ['result', 'VARIABLE'], ['=', 'RESERVED'], ['result', 'VARIABLE'], ['+', 'RESERVED'], ['a', 'VARIABLE'], [';', 'RESERVED'],
['a', 'VARIABLE'], ['=', 'RESERVED'], ['a', 'VARIABLE'], ['-', 'RESERVED'], ['1', 'INTEGER'], ['}', 'RESERVED'], [';', 'RESERVED']]
ast = R_parse(tokens)
return ast
# 单元测试 函数申明
def unit_func_claim():
tokens = [['a', 'VARIABLE'], ['=', 'RESERVED'], ['0', 'INTEGER'], [';', 'RESERVED'],
['isodd', 'VARIABLE'], ['<-function', 'RESERVED'], ['(', 'RESERVED'], ['num', 'VARIABLE'], [')', 'RESERVED'],
['{', 'RESERVED'], ['if', 'RESERVED'], ['(', 'RESERVED'], ['num', 'VARIABLE'], ['%%', 'RESERVED'], ['2', 'INTEGER'], ['!=', 'RESERVED'], ['0', 'INTEGER'], [')', 'RESERVED'], ['{', 'RESERVED'], ['a', 'VARIABLE'], ['=', 'RESERVED'], ['1', 'INTEGER'], ['}', 'RESERVED'], [';', 'RESERVED'], ['a', 'VARIABLE'], ['=', 'RESERVED'], ['0', 'INTEGER'], ['}', 'RESERVED'], [';', 'RESERVED']]
ast = R_parse(tokens)
return ast
# 单元测试 for循环
def unit_for_stmt():
tokens = [['a', 'VARIABLE'], ['=', 'RESERVED'], ['abcde', 'STRING'], [';', 'RESERVED'],
['b', 'VARIABLE'], ['=', 'RESERVED'], ['0', 'INTEGER'], [';', 'RESERVED'],
['for', 'RESERVED'], ['(', 'RESERVED'], ['i', 'VARIABLE'], ['in', 'RESERVED'], ['a', 'VARIABLE'], [')', 'RESERVED'],
['{', 'RESERVED'], ['b', 'VARIABLE'], ['=', 'RESERVED'], ['b', 'VARIABLE'], ['+', 'RESERVED'], ['1', 'INTEGER'], ['}', 'RESERVED'], [';', 'RESERVED']]
ast = R_parse(tokens)
return ast
# 单元测试 函数调用
def unit_func_call():
tokens = [['a', 'VARIABLE'], ['=', 'RESERVED'], ['0', 'INTEGER'], [';', 'RESERVED'],
['isodd', 'VARIABLE'], ['<-function', 'RESERVED'], ['(', 'RESERVED'], ['num', 'VARIABLE'], [')', 'RESERVED'],
['{', 'RESERVED'],
['if', 'RESERVED'], ['(', 'RESERVED'], ['num', 'VARIABLE'], ['%%', 'RESERVED'], ['2', 'INTEGER'], ['!=', 'RESERVED'], ['0', 'INTEGER'], [')', 'RESERVED'],
['{', 'RESERVED'], ['a', 'VARIABLE'], ['=', 'RESERVED'], ['1', 'INTEGER'], ['}', 'RESERVED'], [';', 'RESERVED'], ['a', 'VARIABLE'], ['=', 'RESERVED'], ['0', 'INTEGER'], ['}', 'RESERVED'], [';', 'RESERVED'],
['isodd', 'VARIABLE'], ['(', 'RESERVED'], ['2', 'INTEGER'], [',', 'RESERVED'], ['num2', 'VARIABLE'], [')', 'RESERVED'], [';', 'RESERVED']]
ast = R_parse(tokens)
return ast
# 单元测试 return 语句
def unit_return_sys():
tokens = [['a', 'VARIABLE'], ['=', 'RESERVED'], ['0', 'INTEGER'], [';', 'RESERVED'],
['isodd', 'VARIABLE'], ['<-function', 'RESERVED'], ['(', 'RESERVED'], ['num1', 'VARIABLE'], [')', 'RESERVED'],
['{', 'RESERVED'], ['if', 'RESERVED'], ['(', 'RESERVED'], ['num', 'VARIABLE'], ['%%', 'RESERVED'], ['2', 'INTEGER'], ['!=', 'RESERVED'], ['0', 'INTEGER'], [')', 'RESERVED'],
['{', 'RESERVED'], ['return', 'RESERVED'], ['1', 'INTEGER'], ['}', 'RESERVED'], [';', 'RESERVED'],
['return', 'RESERVED'], ['0', 'INTEGER'], ['}', 'RESERVED'], [';', 'RESERVED'],
['isodd', 'VARIABLE'], ['(', 'RESERVED'], ['2', 'INTEGER'], [')', 'RESERVED'], [';', 'RESERVED']]
ast = R_parse(tokens)
return ast
# 单元测试 repeat循环
def unit_repeat_stmt():
tokens = [['a', 'VARIABLE'], ['=', 'RESERVED'], ['0', 'INTEGER'], [';', 'RESERVED'],
['repeat', 'RESERVED'], ['{', 'RESERVED'], ['a', 'VARIABLE'], ['=', 'RESERVED'], ['a', 'VARIABLE'], ['+', 'RESERVED'], ['1', 'INTEGER'], [';', 'RESERVED'],
['if', 'RESERVED'], ['(', 'RESERVED'], ['a', 'VARIABLE'], ['>', 'RESERVED'], ['5', 'INTEGER'], [')', 'RESERVED'],
['{', 'RESERVED'], ['a', 'VARIABLE'], ['=', 'RESERVED'], ['0', 'INTEGER'], ['}', 'RESERVED'], ['}', 'RESERVED'], [';', 'RESERVED']]
ast = R_parse(tokens)
return ast
# 单元测试 break、next 关键字
# 单元测试 repeat循环
def unit_breakandnext_stmt():
tokens = [['a', 'VARIABLE'], ['=', 'RESERVED'], ['0', 'INTEGER'], [';', 'RESERVED'],
['repeat', 'RESERVED'], ['{', 'RESERVED'], ['a', 'VARIABLE'], ['=', 'RESERVED'], ['a', 'VARIABLE'], ['+', 'RESERVED'], ['1', 'INTEGER'], [';', 'RESERVED'],
['next', 'RESERVED'], [';', 'RESERVED'],
['if', 'RESERVED'], ['(', 'RESERVED'], ['a', 'VARIABLE'], ['>', 'RESERVED'], ['5', 'INTEGER'], [')', 'RESERVED'],
['{', 'RESERVED'], ['break', 'RESERVED'], ['}', 'RESERVED'], ['}', 'RESERVED'], [';', 'RESERVED']]
ast = R_parse(tokens)
return ast
# 单元测试 c函数声明
def unit_c_sys():
tokens = [['c_test', 'VARIABLE'], ['<-c', 'RESERVED'], ['(', 'RESERVED'], ['a', 'STRING'], [',', 'RESERVED'], ['b', 'STRING'], [',', 'RESERVED'], ['c', 'STRING'], [')', 'RESERVED']]
ast = R_parse(tokens)
return ast | true |
1d295d5c15722362e801763244abe6adf0b82948 | Python | nnocsupnn/python-webscraper | /src/components/RedisClient.py | UTF-8 | 738 | 2.71875 | 3 | [] | no_license | import redis
import sys
class RedisClient:
client = None
pubsub = None
isSub = False
host = '127.0.0.1'
password = 'Ccnkbq9V4KDVCyT5FfYpH7ZPhcvisYCf' # Ccnkbq9V4KDVCyT5FfYpH7ZPhcvisYCf
def __init__(self):
self.client = redis.Redis(host=self.host, port=6379, password=self.password)
def getClient(self):
return self.client
def subToKey(self, pattern):
self.pubsub = self.client.pubsub()
self.pubsub.subscribe(pattern)
self.isSub = True
return self
def getSubMessage(self):
message = self.pubsub.get_message()
return message
def publishValue(self, key, value):
self.client.publish(key, value)
def setValue(self, key, value):
self.client.set(key, value) | true |
27b75335f663f8315f11cee4d234084dc2b89b87 | Python | Chuckletowski/Personal-Projects | /04_shipping_fee_calculator.py | UTF-8 | 775 | 4.3125 | 4 | [] | no_license | # Calculate shipping charges for a shopper. Ask the user to enter the amount for their total purchase.
# If their total is under $50, add $10. Otherwise, shipping is free.
# Tell the user their final total including shipping costs and format the number so it looks like a monetary value.
# Don’t forget to test your solution with:
# A value > 50
# A value < 50
# A value of exactly 50
limit_amount = 50
shipping_fee = 10
total_amount = float(input('How much is your total shopping cost? $'))
if total_amount < limit_amount:
total_amount += shipping_fee
print('\n\nYour overall charge is $%.2f, an additional $%d has been charged for shipping fee' %(total_amount, shipping_fee))
else:
print('\n\nYour overall charge is $%.2f with free shipping' %total_amount)
| true |
140d6d256435c858e9cce74c2a109f0bb2d3ef77 | Python | prescottwhite/112-prog3 | /tli.py | UTF-8 | 9,398 | 3.328125 | 3 | [] | no_license | #! /usr/bin/env python3
import fileinput
import sys
# used to store a parsed TL expressions which are
# constant numbers, constant strings, variable names, and binary expressions
# operators: num, str, var, +, -, *, /, ==, <, >, <=, >=, !=
class Expr:
def __init__(self, lineNum, op1, operator, op2=None):
self.legalOps = ["num", "str", "var", "+", "-", "*", "/", "==", "<", ">", "<=", ">=", "!="]
self.op1 = op1
self.operator = operator
if not(self.operator in self.legalOps):
syntaxError(lineNum)
self.op2 = op2
def __str__(self):
if self.op2 == None:
return self.op1
else:
return self.op1 + " " + self.operator + " " + self.op2
# evaluate this expression given the environment of the symTable
def eval(self, lineNum, symTable, labelTable):
if (self.operator == "num"):
return float(self.op1)
elif (self.operator == "str"):
return str(self.op1)
elif self.operator == "var":
return self.findVar(lineNum, self.op1, symTable)
# if variable is being used in expression
if not(isNumber(self.op1)):
x = self.findVar(lineNum, self.op1, symTable)
else:
x = float(self.op1)
if not(isNumber(self.op2)):
y = self.findVar(lineNum, self.op2, symTable)
else:
y = float(self.op2)
if self.operator == "+":
return (x + y)
elif self.operator == "-":
return (x - y)
elif self.operator == "*":
return (x * y)
elif self.operator == "/":
return (x / y)
elif self.operator == "==":
if (x == y):
return 1
else:
return 0
elif self.operator == "<":
if (x < y):
return 1
else:
return 0
elif self.operator == ">":
if (x > y):
return 1
else:
return 0
elif self.operator == "<=":
if (x <= y):
return 1
else:
return 0
elif self.operator == ">=":
if (x >= y):
return 1
else:
return 0
elif self.operator == "!=":
if (x != y):
return 1
else:
return 0
def findVar(self, lineNum, op, symTable):
try:
varVal = float(symTable[op])
return varVal
except KeyError:
varError(op, lineNum)
# used to store a parsed TL statement
class Stmt:
def __init__(self, lineNum, keyword, exprs):
self.lineNum = lineNum
self.keyword = keyword
self.exprs = exprs
def __str__(self):
others = ""
for exp in self.exprs:
others = others + " " + str(exp)
return self.keyword + others
# perform/execute this statement given the environment of the symTable
def perform(self, index, symTable, labelTable):
if self.keyword == "let":
symTable[str(self.exprs[0])] = self.exprs[1].eval(self.lineNum, symTable, labelTable)
return index + 1
elif self.keyword == "if":
if (self.exprs[0].eval(self.lineNum, symTable, labelTable)) == 0:
return index + 1
else:
try:
labelNum = labelTable[str(self.exprs[-1])]
return labelNum
except KeyError:
gotoError(str(self.exprs[-1]), self.lineNum)
elif self.keyword == "print":
strBuilder = ""
for x in self.exprs:
strBuilder = strBuilder + str(x.eval(self.lineNum, symTable, labelTable)) + " "
print(strBuilder)
return index + 1
elif self.keyword == "input":
inputNum = input()
if isNumber(inputNum):
symTable[str(self.exprs[0])] = inputNum
return index + 1
else:
inputError()
def parseFile(file, labelTable, stmtList):
lineNum = 0
for line in file:
lineNum += 1
lineParsed = line.split()
exprList = []
# if line is not empty
if (len(lineParsed) != 0):
# if there is a label
if lineParsed[0].endswith(':'):
# store label without colon
# use index in stmtList directly
labelTable[str(lineParsed[0][:-1])] = len(stmtList)
lineParsed = lineParsed[1:]
keyword = lineParsed[0]
if keyword == "let":
lineParsed.remove("let")
lineParsed.remove("=")
numTokens = len(lineParsed)
# add variable name
exprList.append(Expr(lineNum, lineParsed[0], "var"))
# if form is 'let variable = value' where value is either float or string
if (numTokens) == 2:
if isNumber(lineParsed[1]):
exprList.append(Expr(lineNum, lineParsed[1], "num"))
else:
exprList.append(Expr(lineNum, lineParsed[1], "var"))
# if form is 'let variable = value operator value' where '=' is removed and value is either float or variable
elif (numTokens) == 4:
exprList.append(Expr(lineNum, lineParsed[1], lineParsed[2], lineParsed[3]))
else:
syntaxError(lineNum)
stmtList.append(Stmt(lineNum, keyword, exprList))
elif keyword == "if":
lineParsed.remove("if")
lineParsed.remove("goto")
numTokens = len(lineParsed)
if (numTokens) == 2:
if isNumber(lineParsed[0]):
exprList.append(Expr(lineNum, lineParsed[0], "num"))
else:
exprList.append(Expr(lineNum, lineParsed[0], "var"))
elif (numTokens) == 4:
exprList.append(Expr(lineNum, lineParsed[0], lineParsed[1], lineParsed[2]))
else:
syntaxError(lineNum)
# add label to end of statement, this is the label to goto if expression is true
exprList.append(Expr(lineNum, lineParsed[-1], "str"))
stmtList.append(Stmt(lineNum, keyword, exprList))
elif keyword == "print":
# stitch parsed line up to preserve quotes that were split in the middle
line = " ".join(lineParsed)
# remove 'print'
line = line[5:]
# split on comma
lineParsed = line.split(',')
# for each expression
for x in lineParsed:
x = x.strip()
if x.startswith('"') and x.endswith('"'):
exprList.append(Expr(lineNum, x[1:-1], "str"))
else:
x = x.split()
if len(x) == 1:
if isNumber(x[0]):
exprList.append(Expr(lineNum, x[0], "num"))
else:
exprList.append(Expr(lineNum, x[0], "var"))
elif len(x) == 3:
exprList.append(Expr(lineNum, x[0], x[1], x[2]))
else:
syntaxError(lineNum)
stmtList.append(Stmt(lineNum, keyword, exprList))
elif keyword == "input":
lineParsed.remove("input")
if len(lineParsed) == 1:
exprList.append(Expr(lineNum, lineParsed[0], "var"))
stmtList.append(Stmt(lineNum, keyword, exprList))
else:
syntaxError(lineNum)
else:
syntaxError(lineNum)
# found on StackOverflow
# https://stackoverflow.com/questions/354038/how-do-i-check-if-a-string-is-a-number-float?rq=1
def isNumber(s):
try:
float(s)
return True
except ValueError:
return False
# error methods
def syntaxError(lineNum):
print("Syntax error on line " + str(lineNum) + ".")
sys.exit()
def gotoError(label, lineNum):
print("Illegal goto " + str(label) + " at line " + str(lineNum) + ".")
sys.exit()
def varError(varName, lineNum):
print("Undefined variable " + str(varName) + " at line " + str(lineNum) + ".")
sys.exit()
def inputError():
print("Illegal or missing input")
sys.exit()
def executeStmts(symTable, labelTable, stmtList):
index = 0
while (index < len(stmtList)):
index = stmtList[index].perform(index, symTable, labelTable)
def main():
# read 1st argument when calling script
fileName = sys.argv[1]
# open file with given filename
file = open(fileName, "r")
# line number, symbol and label tables, statement list
lineNum = 1
symTable = {}
labelTable = {}
stmtList = []
parseFile(file, labelTable, stmtList)
executeStmts(symTable, labelTable, stmtList)
main() | true |
cfc464a3239ab856d0c7d8660096b0026098601f | Python | hnz71211/Python-Basis | /com.lxh/learning/10_function_param/__init__.py | UTF-8 | 3,643 | 4.1875 | 4 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 默认参数
# 当调用power(5)时, 相当于调用power(5, 2)
def power(x, n=2):
s = 1
while n > 0:
n = n - 1
s = s * x
return s
# 定义默认参数要牢记一点:默认参数必须指向不变对象!
# 多次调用add_end(),结果是不一样的
def add_end(L=[]):
L.append('END')
return L
def add_end2(L=None):
if L is None:
L = []
L.append('END')
return L
# 可变参数
def calc(*numbers):
sum = 0
for n in numbers:
sum = sum + n * n
return sum
calc(1, 2)
calc()
# *nums表示把nums这个list的所有元素作为可变参数传进去。
nums = [1, 2, 3]
calc(*nums)
# 关键字参数,允许传入0个或任意个含参数名的参数,这些参数在函数内自动组装成一个dict
# 关键字参数kw
def person(name, age, **kw):
print('name:', name, 'age:', age, 'other:', kw)
person('Michael', 30) # name: Michael age: 30 other: {}
person('Bob', 35, city='Beijing') # name: Bob age: 35 other: {'city': 'Beijing'}
person('Adam', 45, gender='M', job='Engineer') # name: Adam age: 45 other: {'gender': 'M', 'job': 'Engineer'}
extra = {'city': 'Beijing', 'job': 'Engineer'}
person('Jack', 24, **extra) #vname: Jack age: 24 other: {'city': 'Beijing', 'job': 'Engineer'}
# 关键字参数,函数的调用者可以传入任意不受限制的关键字参数
def person(name, age, **kw):
if 'city' in kw:
# 有city参数
pass
if 'job' in kw:
# 有job参数
pass
print('name:', name, 'age:', age, 'other:', kw)
# 调用者传入不受限制的关键字参数
person('Jack', 24, city='Beijing', addr='Chaoyang', zipcode=123456)
# 命名关键字参数,限制关键字参数的名字,就可以用命名关键字参数,例如,只接收city和job作为关键字参数
# 命名关键字参数需要一个特殊分隔符*,*后面的参数被视为命名关键字参数
def person(name, age, *, city, job):
print(name, age, city, job)
person('Jack', 24, city='Beijing', job='Engineer')
#如果函数定义中已经有了一个可变参数,后面跟着的命名关键字参数就不再需要一个特殊分隔符*了
def person(name, age, *args, city, job):
print(name, age, args, city, job)
def person(name, age, *, city='Beijing', job):
print(name, age, city, job)
# 参数组合,用必选参数、默认参数、可变参数、关键字参数和命名关键字参数,这5种参数都可以组合使用。
# 参数定义的顺序必须是:必选参数、默认参数、可变参数、命名关键字参数和关键字参数
def f1(a, b, c=0, *args, **kw):
print('a =', a, 'b =', b, 'c =', c, 'args =', args, 'kw =', kw)
def f2(a, b, c=0, *, d, **kw):
print('a =', a, 'b =', b, 'c =', c, 'd =', d, 'kw =', kw)
f1(1, 2)
# a = 1 b = 2 c = 0 args = () kw = {}
f1(1, 2, c=3)
# a = 1 b = 2 c = 3 args = () kw = {}
f1(1, 2, 3, 'a', 'b')
# a = 1 b = 2 c = 3 args = ('a', 'b') kw = {}
f1(1, 2, 3, 'a', 'b', x=99)
# a = 1 b = 2 c = 3 args = ('a', 'b') kw = {'x': 99}
f2(1, 2, d=99, ext=None)
# a = 1 b = 2 c = 0 d = 99 kw = {'ext': None}
# 对于任意函数,都可以通过类似func(*args, **kw)的形式调用它,无论它的参数是如何定义的。
# 虽然可以组合多达5种参数,但不要同时使用太多的组合,否则函数接口的可理解性很差。
args = (1, 2, 3, 4)
kw = {'d': 99, 'x': '#'}
f1(*args, **kw)
# a = 1 b = 2 c = 3 args = (4,) kw = {'d': 99, 'x': '#'}
args = (1, 2, 3)
kw = {'d': 88, 'x': '#'}
f2(*args, **kw)
# a = 1 b = 2 c = 3 d = 88 kw = {'x': '#'} | true |
a808121de4e326de57c884e2d4509fff9d95e267 | Python | jbrownxf/mycode | /lab_input/input_ip.py | UTF-8 | 281 | 3.59375 | 4 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | #!usr/bin/env python3
#author- josh brown
# to collect user ip information and display it back to them
#user's input for thier ip address
user_input = input('Please enter an IPv4 IP address:')
##prints the user's input to verify
print("You told me teh IPv4 address is:" + user_input) | true |
e6a965fe73791f892744afbd4fb1b81e79de9761 | Python | AuroraFeng/Deep-actor-based-reinforcement-learning-for-portfolio-management | /codes/network.py | UTF-8 | 3,130 | 2.8125 | 3 | [] | no_license | ### Aurora
"""
Neural network architecture
Reference: https://github.com/wassname/rl-portfolio-management/blob/master/keras-ddpg.ipynb
"""
# numeric
import numpy as np
from numpy import random
import pandas as pd
import tensorflow
import keras
# reinforcement learning
import gym
from gym import error, spaces, utils
from gym.utils import seeding
from keras.models import Model, Sequential
from keras.layers import Input, InputLayer, Dense, Activation, BatchNormalization, Dropout, regularizers
from keras.layers import Conv1D, Conv2D
from keras.layers import Flatten, Reshape, concatenate, merge
from keras.optimizers import Adam
from keras.activations import relu
from keras.layers.advanced_activations import LeakyReLU
from keras.regularizers import l2, l1_l2
#################### CNN ####################
class Network( object ):
"""Build neural network architecture."""
def __init__( self, number_assets, window, number_features = 1 ):
"""Constructor."""
self.number_assets = number_assets ### number of assets (excluding cash)
self.window = window
self.number_features = number_features
### our input is a single price (or return) series, not high, low and closed prices
### could expand to a broader set of features
def actor( self, number_samples = 1, feature_maps = ( 2, 20, 1 ), kernel_size = 3, activation = 'relu' ):
"""CNN actor model"""
###### 0. input layer ######
x1 = Input( shape = ( number_samples, self.number_assets, window, self.number_features ), name = 'return time series' )
### last dimension -- self.number_features -- denotes channel
x2 = Reshape( ( self.number_assets, window, self.number_features ) )( x1 )
###### 1. conv2D layer ######
x2 = Conv2D( filters = feature_maps[0], kernel_size = ( 1, kernel_size ), kernel_regularizer = l2(reg), activation = activation )( x2 )
x2 = BatchNormalization()( x2 )
###### 2. conv2D layer ######
x2 = Conv2D( filters = feature_maps[1], kernel_size = ( 1, self.window - kernel_size + 1 ), kernel_regularizer = l2(reg), activation = activation )( x2 )
x2 = BatchNormalization()( x2 )
###### Now we have 20 ( number_assets * 1 ) feature maps
###### previous action
z1 = Input( shape = ( self.number_assets, ), name = 'previous action' )
### x2 = Flatten()( x2 ) ?
xx = concatenate()( [ x2, z1 ], axis = 1 )
###### 3. conv2D layer ######
xx = Conv2D( filters = feature_maps[2], kernel_size = ( 1, 1 ), kernel_regularizer = l2(reg), activation = activation )( xx )
xx = BatchNormalization()( xx )
xx = Flatten()( xx )
###### add cash bias ######
### keras add bias by default?
xx = Dense( units = self.number_assets + 1, kernel_regularizer = l2(reg) )( xx )
###### softmax ######
y = Activation( 'softmax' )( xx )
model = Model( inputs = [ x1, z1 ], outputs = y )
print( 'model summary: \n', model.summary() )
return model
| true |
92858234834cf6ad2df2fd097425f97a0019d0e0 | Python | aaronabebe/DOPP | /data_extension_edu.py | UTF-8 | 2,259 | 2.921875 | 3 | [] | no_license | import pandas as pd
import streamlit as st
st.markdown("# Extending the base dataset with different data")
st.markdown("## Base Data")
with st.echo():
# LOAD BASE DATA
base = pd.read_csv("transformed.csv", index_col="Unnamed: 0")
st.write(base)
st.write(base.shape)
st.markdown('## Education Data')
st.markdown('_pick the number of variables to use from the dataset (max 2992)_')
with st.echo():
nr_vars = 50
st.write('Number of Variables used:',nr_vars)
with st.echo():
# LOAD EDU DATA
raw_edu = pd.read_csv("unesco_education_dataset.csv")
keys = raw_edu.EDULIT_IND.unique()
# DEFINE BASE CSV
stem = raw_edu[['LOCATION', 'TIME']]
# FOR EVERY VAR JOIN ON LOCATION & TIME
for i in range(0, nr_vars):
print(keys[i], i, '/', nr_vars)
loop = raw_edu.loc[raw_edu.EDULIT_IND == keys[i]]
stem = pd.merge(stem, loop[['LOCATION', 'TIME', 'Value']], how='left', left_on=['LOCATION','TIME'], right_on = ['LOCATION','TIME'])
# FIX COLUMNS
stem.columns = ['LOCATION', 'TIME'] + [str(col) for col in keys][:nr_vars]
# DROP DUPLICATES
edu = stem.drop_duplicates()
st.write(edu)
st.write(edu.shape)
st.markdown("## Features")
st.markdown("extracted similarly to _data_prep.py_")
# GET DATA PER COLUMN
na_percent = []
na_total = []
minimum = []
maximum = []
for col in edu.columns:
na_percent.append(round(edu[col].isna().sum() /edu.shape[0] * 100, 2))
na_total.append(edu[col].isna().sum())
minimum.append(edu[col].min())
maximum.append(edu[col].max())
# GET VARIABLE DESCRIPTIONS
descriptions = raw_edu['Indicator'].drop_duplicates().tolist()[:nr_vars]
descriptions.insert(0, 'LOCATION')
descriptions.insert(1, 'TIME')
features = pd.DataFrame(
{'descriptions': descriptions,
'na_percent': na_percent,
'na_total': na_total,
'minimum': minimum,
'maximum': maximum},
index=edu.columns)
st.write(features)
st.write(features.shape)
st.markdown('## Additional Information & Experimentation')
with st.echo():
# Number of distinct countries in LOCATION column
st.write('number of countries', len(edu['LOCATION'].unique()))
# SAVE
base.to_csv('edu_transformed_%d.csv' % (nr_vars), sep=',', na_rep="NA")
st.balloons() | true |
b24827dbf007c96d4d32b6b3d052d9e2d56e424d | Python | BZukerman/StepikRepo | /Python_Programming/Basics and use/Temp_Dict.py | UTF-8 | 991 | 3.21875 | 3 | [] | no_license | variables = {"": []}
print("1", variables) # 1 {'': []}
set = ["a"]
key = "global"
pair = {key: set}
variables = pair
print("2", variables) # 2 {'global': ['a']}
set1 = ["b"]
pair = {key: set1}
variables.update(pair)
print("3", variables) # 3 {'global': ['b']}
data = variables.items()
print("4", data) # 4 dict_items([('global', ['b'])])
add = ["c"]
# data.extend(add) # AttributeError: 'dict_items' object has no attribute 'extend'
# print("5", data)
# data.values(add)
# print("6", data) # AttributeError: 'dict_items' object has no attribute 'values'
data = variables.get(key)
print("7", data) # 7 ['b']
data.extend(add)
print("8", data) # 8 ['b', 'c']
add = "d"
data = variables.get(key)
data.append(add)
print("9", data) # 9 ['b', 'c', 'd']
pair = {key: data}
print("10", pair) # 10 {'global': ['b', 'c', 'd']}
variables.update(pair)
print(variables) # {'global': ['b', 'c', 'd']} | true |
6607cd9cf5f5be5b6a993dbe6a64df652ca8dd10 | Python | CaioFRodrigues/Formais | /lib/libGrammarReader.py | UTF-8 | 5,514 | 3.578125 | 4 | [] | no_license | #!/usr/bin/env python3
import re
from lib.libExcept import *
"""
Grammar type specifications:
grammar is a dictionary X => Y where
X is 'terms', 'rnames', 'start' or 'rules'
Y depends on the value of X:
'terms' => Y is a set of strings representing terminal symbols
'rnames' => Y is a set of strings representing variables
'start' => Y is a string representing the starting variable
'rules' => Y is a dictionary M => N where
M is a string representing a variable name
N is a list of tuples of strings representing productions for that variable
"""
def parseGrammarFile(fname):
"""
Open and parse a text file into a grammar type
Keyword arguments:
fname = string describing the name of the grammar file
Return value:
g = the interpreted grammar in grammar format
"""
# if anything goes wrong, skip the rest of the block
# to know more, search for 'python exception handling'
try:
fp = open(fname, 'r')
g = {
'terms': [], # list of terminals
'rnames': [], # list of variables
'start': "", # starting symbol
'rules': {}, # rnames and their lists of productions
}
g['terms'] = parseTerms(fp)
g['rnames'] = parseVars(fp)
g['start'] = parseStart(fp)
g['rules'] = parseRules(fp)
fp.close()
return g
# re-raise to __main__
except:
raise
def parseTerms(fp):
"""
Parse lines from a text file into the set of grammar terminals
Keyword arguments:
fp = handle to the open grammar file
Return value:
set containing the list of terminal strings
"""
try:
# match "Terminais", with or without a comment etc., using RegEx
# to know more, search for 'python regular expressions'
ln = fp.readline()
p = re.compile(r'^Terminais\s*(?:#.*)?$')
m = p.match(ln)
if m == None:
raise ParseError(ParseError.termsMsg)
# match and capture "{ a, b }" etc.
ln = fp.readline()
p = re.compile(r'^\{\s*(.*)\s*\}\s*(?:#.*)?$')
m = p.match(ln)
if m == None:
raise ParseError(ParseError.termsMsg)
# split the string by ',', trim spaces and return as a set
a = m.group(1).split(',') # group(1) contains each captured group
a[:] = map(str.strip, a) # trim spaces in each element of a
return set(a) # remove duplicates
except:
raise
def parseVars(fp):
"""
Parse lines from a text file into the set of grammar variables
Keyword arguments:
fp = handle to the open grammar file
Return value:
set containing the list of variable names
"""
try:
ln = fp.readline()
p = re.compile(r'^Variaveis\s*(?:#.*)?$')
m = p.match(ln)
if m == None:
raise ParseError(ParseError.rnamesMsg)
ln = fp.readline()
p = re.compile(r'^\{\s*(.*)\s*\}\s*(?:#.*)?$')
m = p.match(ln)
if m == None:
raise ParseError(ParseError.rnamesMsg)
a = m.group(1).split(',')
a[:] = map(str.strip, a)
return set(a)
except:
raise
def parseStart(fp):
"""
Parse lines from a text file into the starting variable
Keyword arguments:
fp = handle to the open grammar file
Return value:
string representing the starting variable name
"""
try:
ln = fp.readline()
p = re.compile(r'^Inicial\s*(?:#.*)?$')
m = p.match(ln)
if m == None:
raise ParseError(ParseError.startMsg)
# match and capture "{ a }" etc.
ln = fp.readline()
p = re.compile(r'^\{\s*(.*)\s*\}\s*(?:#.*)?$')
m = p.match(ln)
if m == None:
raise ParseError(ParseError.startMsg)
return m.group(1).strip()
except:
raise
def parseRules(fp):
"""
Parse lines from a text file into the grammar rules
Keyword arguments:
fp = handle to the open grammar file
Return value:
rules = dictionary with variable names and their productions
"""
try:
ln = fp.readline()
p = re.compile(r'^Regras\s*(?:#.*)?$')
m = p.match(ln)
if m == None:
raise ParseError(ParseError.rulesMsg)
rules = {}
# process each line until the end
for line in fp:
# skip empty lines
if len(line) < 3: # expect at least "{>}"
continue
# match "{ a > a, b }" etc.
p = re.compile(r'^\{\s*(.*)\s*>\s*(.*)\s*\}\s*;?\s*(?:#.*)?$')
m = p.match(line)
if m == None:
raise ParseError(ParseError.rulesMsg)
rname = m.group(1).strip() # group(1) = rname
prods = m.group(2).split(',') # group(2) = productions string
prods[:] = map(str.strip, prods)
prods = tuple(prods) # convert it to a tuple (immutable list)
# add each production to dict['variable']
if not rname in rules.keys(): # create a new dict entry?
rules[rname] = []
rules[rname].append(prods)
# remove duplicate productions
for rname in rules.keys():
rules[rname] = list(set(rules[rname]))
return rules
except:
raise
| true |
9aec94d069a1b7464416807f2e5ea78fa0032b6e | Python | jlh040/Cook-It-Up-Capstone-1 | /models.py | UTF-8 | 6,218 | 2.84375 | 3 | [] | no_license | from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
from secret_keys import API_KEY
from helper_funcs import make_additional_calls, get_ingredients_from_recipe
import requests
import json
db = SQLAlchemy()
bcrypt = Bcrypt()
def connect_db(app):
db.app = app
db.init_app(app)
class User(db.Model):
"""Represents a user in the database."""
__tablename__ = 'users'
id = db.Column(
db.Integer,
autoincrement= True,
primary_key = True
)
username = db.Column(
db.String(25),
unique = True,
nullable = False
)
password = db.Column(
db.Text,
nullable = False
)
first_name = db.Column(
db.String(20),
nullable = False
)
last_name = db.Column(
db.String(20)
)
image_url = db.Column(
db.Text,
default = 'https://tinyurl.com/profile-default-image'
)
email = db.Column(
db.Text,
unique = True,
nullable = False
)
favorite_recipes = db.relationship( # Changed to 'cooked-recipes' ?
'Recipe',
secondary = 'users_recipes'
)
def __repr__(self):
"""Create a representation of the user."""
return f'<User: {self.username}, id: {self.id}>'
@classmethod
def signup(cls, username, password, first_name, email, last_name=None, image_url=None):
"""Register user w/ hashed password and return the user."""
hashed = bcrypt.generate_password_hash(password)
hashed_utf8 = hashed.decode('utf8')
return cls(
username=username,
password=hashed_utf8,
first_name=first_name,
last_name=last_name,
image_url=image_url,
email=email
)
@classmethod
def login(cls, username, password):
"""Return the user if they can be authenticated,
otherwise return False.
"""
user = User.query.filter(User.username == username).first()
if user and bcrypt.check_password_hash(user.password, password):
return user
else:
return False
class UserRecipe(db.Model):
"""Associates users and their favorite recipes."""
__tablename__ = 'users_recipes'
id = db.Column(
db.Integer,
autoincrement = True,
primary_key = True
)
user_id = db.Column(
db.Integer,
db.ForeignKey('users.id', ondelete='cascade')
)
recipe_id = db.Column(
db.Integer,
db.ForeignKey('recipes.api_id')
)
class Recipe(db.Model):
"""Represents a recipe in the database."""
__tablename__ = 'recipes'
api_id = db.Column(
db.Integer,
autoincrement = True,
primary_key = True,
index = True
)
def __repr__(self):
"""Create a representation of a recipe."""
return f'<api_id: {self.api_id}>'
@classmethod
def get_recipes_by_cuisine(cls, cuisine_name):
"""Search for a list of recipes by cuisine name."""
api_endpoint = 'https://api.spoonacular.com/recipes/complexSearch'
resp = requests.get(api_endpoint, params = {
'cuisine': cuisine_name,
'apiKey': API_KEY,
'number': 100,
'instructionsRequired': True
})
list_of_recipe_titles = [(dictt['id'], dictt['title']) for dictt in resp.json()['results']]
list_of_recipe_titles = make_additional_calls(resp, list_of_recipe_titles, cuisine_name=cuisine_name)
return list_of_recipe_titles
@classmethod
def get_recipes_by_query_and_cals(cls, query, cals):
"""Search for a list of recipes by a query term."""
api_endpoint = 'https://api.spoonacular.com/recipes/complexSearch'
resp = requests.get(api_endpoint, params = {
'query': query,
'apiKey': API_KEY,
'number': 100,
'maxCalories': cals,
'instructionsRequired': True
})
list_of_recipe_titles = [(dictt['id'], dictt['title'], dictt['nutrition']['nutrients'][0]['amount']) for dictt in resp.json()['results']]
list_of_recipe_titles = make_additional_calls(resp, list_of_recipe_titles, query=query, cals=cals)
return list_of_recipe_titles
@classmethod
def get_recipe_info(cls, id):
"""Return a recipe's meta-info by id."""
api_endpoint = f'https://api.spoonacular.com/recipes/{id}/information'
resp = requests.get(api_endpoint, params = {'apiKey': API_KEY})
title = resp.json()['title']
image_url = resp.json().get('image', 'https://tinyurl.com/ymxdeb5y')
ingredients = get_ingredients_from_recipe(resp)
return (title, image_url, ingredients)
@classmethod
def get_equipment_for_recipe(cls, id):
"""Get a recipe's equipment."""
api_endpoint = f'https://api.spoonacular.com/recipes/{id}/equipmentWidget.json'
resp = requests.get(api_endpoint, params={'apiKey': API_KEY})
return resp.json()
@classmethod
def get_instructions_for_recipe(cls, id):
"""Get a recipe's instructions."""
api_endpoint = f'https://api.spoonacular.com/recipes/{id}/analyzedInstructions'
resp = requests.get(api_endpoint, params = {
'apiKey': API_KEY,
})
list_of_instructions = [
(obj['number'], obj['step']) for obj in resp.json()[0]['steps']
]
return list_of_instructions
@classmethod
def get_multiple_recipes(cls, list_of_recipe_objs):
"""Get multiple recipes at once."""
if list_of_recipe_objs:
api_endpoint = 'https://api.spoonacular.com/recipes/informationBulk'
api_ids = [str(obj.api_id) for obj in list_of_recipe_objs]
resp = requests.get(api_endpoint, params = {
'apiKey': API_KEY,
'ids': ','.join(api_ids)
})
recipe_list = [(obj['id'], obj['title'], obj['image']) for obj in resp.json()]
return recipe_list
else:
return list_of_recipe_objs | true |
ad1a5b440df8385b742d4c2042584449375e144f | Python | VinayakBagaria/Personal-Blogging | /src/models/post.py | UTF-8 | 1,554 | 2.890625 | 3 | [] | no_license | import datetime
import uuid
from API.src.common.database import Database
class Post(object):
def __init__(self, blog_id, title, content, author, created_date=datetime.datetime.utcnow(),_id=None):
self.blog_id=blog_id
self.title=title
self.content=content
self.author=author
self.created_date=created_date
# to generate an unique id in hex format 32 char
self._id=uuid.uuid4().hex if _id is None else _id
def save_to_mongo(self):
Database.insert(collection='posts',data=self.json())
# Create a json representation of the post itself via set to store in mongo db
def json(self):
return {
'_id':self._id,
'blog_id':self.blog_id,
'author':self.author,
'content':self.content,
'title':self.title,
'created_date':self.created_date
}
# search a blog acccording to a id
@classmethod
def from_mongo(cls, _id):
post_data= Database.find_one(collection='posts', query={'_id':_id})
return cls(**post_data)
""" **post_data work :
blog_id=post_data['blog_id'],
title=post_data['title'],
content=post_data['content'],
author=post_data['author'],
created_date=post_data['created_date'],
_id=post_data['_id']
"""
@staticmethod
def from_blog(id):
return [post for post in Database.find(collection='posts',query={'blog_id':id})] | true |