blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
32efff5a989beba8a65027873b832074988be3d1 | 5125880d0b7af5cc86ab65f839f5db0d5cff9640 | /python/dex/__init__.py | 6850c436d32d6d30002244b9e4128e1f497b1430 | [
"BSD-3-Clause"
] | permissive | stjordanis/dex-lang | 8d9582f96cc02bf631cf325f3dde9a729ed9941e | d8540257c8e00c9d9e86f4b53190052b5a145b68 | refs/heads/main | 2023-02-02T12:12:42.068628 | 2020-12-19T21:57:04 | 2020-12-19T21:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,032 | py | # Copyright 2020 Google LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
import itertools as it
import ctypes
import pathlib
import atexit
from enum import Enum
from typing import List
__all__ = ['execute']
here = pathlib.Path(__file__).parent.absolute()
lib = ctypes.cdll.LoadLibrary(here / 'libDex.so')
def tagged_union(name: str, members: List[type]):
named_members = [(f"t{i}", member) for i, member in enumerate(members)]
payload = type(name + "Payload", (ctypes.Union,), {"_fields_": named_members})
union = type(name, (ctypes.Structure,), {
"_fields_": [("tag", ctypes.c_uint64), ("payload", payload)],
"value": property(lambda self: getattr(self.payload, f"t{self.tag}")),
})
return union
CLit = tagged_union("Lit", [ctypes.c_int64, ctypes.c_int32, ctypes.c_int8, ctypes.c_double, ctypes.c_float])
class CRectArray(ctypes.Structure):
_fields_ = [("data", ctypes.c_void_p),
("shape_ptr", ctypes.POINTER(ctypes.c_int64)),
("strides_ptr", ctypes.POINTER(ctypes.c_int64))]
CAtom = tagged_union("CAtom", [CLit, CRectArray])
assert ctypes.sizeof(CAtom) == 4 * 8
class HsAtom(ctypes.Structure): pass
class HsContext(ctypes.Structure): pass
_init = lib.dexInit
_init.restype = None
_init.argtypes = []
_fini = lib.dexFini
_fini.restype = None
_fini.argtypes = []
_create_context = lib.dexCreateContext
_create_context.restype = ctypes.POINTER(HsContext)
_create_context.argtypes = []
_destroy_context = lib.dexDestroyContext
_destroy_context.restype = None
_destroy_context.argtypes = [ctypes.POINTER(HsContext)]
_print = lib.dexPrint
_print.restype = ctypes.c_char_p
_print.argtypes = [ctypes.POINTER(HsAtom)]
_insert = lib.dexInsert
_insert.restype = ctypes.POINTER(HsContext)
_insert.argtypes = [ctypes.POINTER(HsContext), ctypes.c_char_p, ctypes.POINTER(HsAtom)]
_eval = lib.dexEval
_eval.restype = ctypes.POINTER(HsContext)
_eval.argtypes = [ctypes.POINTER(HsContext), ctypes.c_char_p]
_evalExpr = lib.dexEvalExpr
_evalExpr.restype = ctypes.POINTER(HsAtom)
_evalExpr.argtypes = [ctypes.POINTER(HsContext), ctypes.c_char_p]
_lookup = lib.dexLookup
_lookup.restype = ctypes.POINTER(HsAtom)
_lookup.argtypes = [ctypes.POINTER(HsContext), ctypes.c_char_p]
_toCAtom = lib.dexToCAtom
_toCAtom.restype = ctypes.c_int
_toCAtom.argtypes = [ctypes.POINTER(HsAtom), ctypes.POINTER(CAtom)]
_getError = lib.dexGetError
_getError.restype = ctypes.c_char_p
_getError.argtypes = []
_init()
_nofree = False
@atexit.register
def _teardown():
global _nofree
_fini()
_nofree = True # Don't destruct any Haskell objects after the RTS has been shutdown
def _as_cstr(x: str):
return ctypes.c_char_p(x.encode('ascii'))
def _from_cstr(cx):
return cx.value.decode('ascii')
class Module:
__slots__ = ('_as_parameter_',)
def __init__(self, source):
self._as_parameter_ = _eval(prelude, _as_cstr(source))
if not self._as_parameter_:
raise RuntimeError(_from_cstr(_getError()))
def __del__(self):
if _nofree:
return
_destroy_context(self)
def __getattr__(self, name):
result = _lookup(self, _as_cstr(name))
if not result:
raise RuntimeError(_from_cstr(_getError()))
return Atom(result, self)
class Prelude(Module):
__slots__ = ()
def __init__(self):
self._as_parameter_ = _create_context()
if not self._as_parameter_:
raise RuntimeError("Failed to initialize prelude!")
prelude = Prelude()
def eval(expr: str, module=prelude, _env=None):
if _env is None:
_env = module
result = _evalExpr(_env, _as_cstr(expr))
if not result:
raise RuntimeError(_from_cstr(_getError()))
return Atom(result, module)
class Atom:
__slots__ = ('_as_parameter_', 'module')
def __init__(self, ptr, module):
self._as_parameter_ = ptr
self.module = module
def __del__(self):
# TODO: Free
pass
def __repr__(self):
return _print(self).decode('ascii')
def __int__(self):
return int(self._as_scalar())
def __float__(self):
return float(self._as_scalar())
def _as_scalar(self):
result = CAtom()
success = _toCAtom(self, ctypes.pointer(result))
if not success:
raise RuntimeError(_from_cstr(_getError()))
value = result.value
if not isinstance(value, CLit):
raise TypeError("Atom is not a scalar value")
return value.value
def __call__(self, *args):
# TODO: Make those calls more hygenic
env = self.module
for i, atom in enumerate(it.chain((self,), args)):
# NB: Atoms can contain arbitrary references
if atom.module is not prelude and atom.module is not self.module:
raise RuntimeError("Mixing atoms coming from different Dex modules is not supported yet!")
old_env, env = env, _insert(env, _as_cstr(f"python_arg{i}"), atom)
_destroy_context(old_env)
return eval(" ".join(f"python_arg{i}" for i in range(len(args) + 1)), module=self.module, _env=env)
| [
"adam.paszke@gmail.com"
] | adam.paszke@gmail.com |
0a405906f42b2f5c2a7f3d2f861450fc9416b069 | b04183027d7fe10d034092fc5de349dbca4a2851 | /KuegeliFarbe/testServoWithGetCh.py | eb2ef4338e52d994d2b0b9fc766729385b8bc4f8 | [] | no_license | magictimelapse/RaspberryJamSchweiz | b96afb123c9b38de8f8eff8c42e0f28204958762 | 8ad4b30e04dfc081b6af60e8049cb73624e6073d | refs/heads/master | 2021-01-20T21:07:09.302708 | 2018-07-03T17:04:43 | 2018-07-03T17:04:43 | 64,651,932 | 1 | 3 | null | 2017-11-23T20:53:14 | 2016-08-01T08:58:18 | Python | UTF-8 | Python | false | false | 687 | py | from servoControl import auf,zu
from gibPfeilTaste import gibPfeilTaste
import time
servos = {
"rechts": {"number": 0, "state":False},
"links": {"number": 1, "state":False},
"unten": {"number": 2, "state":False},
"oben": {"number": 3, "state":False}
}
def setServos(servos):
for servo in servos:
if servos[servo]["state"]:
auf(servos[servo]["number"])
else:
zu(servos[servo]["number"])
time.sleep(0.25)
setServos(servos)
while True:
pfeil = gibPfeilTaste()
print (pfeil)
servos[pfeil]["state"] = not servos[pfeil]["state"]
#states[number] = not states[number]
setServos(servos)
| [
"michael.rissi@gmail.com"
] | michael.rissi@gmail.com |
a6bc37dc9ad5bc6951bca4dc1f04117dbd420531 | 7ab35999f8a9fcdbc153176d50f38857eaf169b3 | /exercises/generators.py | 8a6ede0dbac143539a45d075fd1057a2472367b4 | [] | no_license | teknik-eksjo/iterators | 5a5e54f581f5d2e19fc93d67b975d533899bdc41 | b16ef34343ce28e38db1b8291fdce2e52bb8bcd8 | refs/heads/master | 2021-01-10T02:57:58.891394 | 2017-10-27T07:04:02 | 2017-10-27T07:04:02 | 48,051,745 | 1 | 20 | null | 2017-11-27T12:44:52 | 2015-12-15T15:31:18 | Python | UTF-8 | Python | false | false | 1,785 | py | """Övningar på generators."""
def cubes():
"""Implementera en generator som skapar en serie med kuber (i ** 3).
Talserien utgår från de positiva heltalen: 1, 2, 3, 4, 5, 6, ...
Talserien som skapas börjar således: 1, 8, 27, 64, 125, 216, ...
Talserien ska inte ha något slut.
"""
pass
def primes():
"""Implementera en generator som returnerar primtal.
Talserien som förväntas börjar alltså: 2, 3, 5, 7, 11, 13, 17, 19, 23, ...
"""
pass
def fibonacci():
"""Implementera en generator som returnerar de berömda fibonacci-talen.
Fibonaccis talserie börjar med 0 och 1. Nästa tal är sedan summan av de
två senaste.
Alltså börjar serien: 0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, ...
"""
pass
def alphabet():
"""En generator som returnerar namnen på tecknen i det hebreiska alfabetet.
Iteratorn returnerar namnen för de hebreiska bokstäverna i alfabetisk
ordning. Namnen och ordningen är:
Alef, Bet, Gimel, Dalet, He, Vav, Zayin, Het, Tet, Yod, Kaf, Lamed, Mem,
Nun, Samekh, Ayin, Pe, Tsadi, Qof, Resh, Shin, Tav
"""
def permutations(s):
"""En generator som returnerar alla permutationer av en inmatad sträng.
Då strängen 'abc' matas in fås: 'abc', 'acb', 'bac', 'bca', 'cba', 'cab'
"""
pass
def look_and_say():
"""En generator som implementerar look-and-say-talserien.
Sekvensen fås genom att man läser ut och räknar antalet siffror i
föregående tal.
1 läses 'en etta', alltså 11
11 läses 'två ettor', alltså 21
21 läses 'en tvåa, en etta', alltså 1211
1211 läses 'en etta, en tvåa, två ettor', alltså 111221
111221 läses 'tre ettor, två tvåor, en etta', alltså 312211
"""
pass
| [
"linus@etnolit.se"
] | linus@etnolit.se |
5e0e8fb64e239e682842f4872ffa5875bf6193ed | d85f3bfcc7efb3313bd77ba43abbde8527c731d9 | /ch03/ex3-4.py | f0c21790e3b61443d823170f444377a3dd28e0b7 | [] | no_license | freebz/Introducing-Python | 8c62767e88b89eb614abd3ea4cf19aae946f5379 | ecf2082946eac83072328a80ed1e06b416ef5170 | refs/heads/master | 2020-04-08T21:14:42.398462 | 2018-11-29T17:03:11 | 2018-11-29T17:03:11 | 159,736,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,297 | py | # 3.5 셋
# 3.5.1 셋 생성하기: set()
empty_set = set()
empty_set
# set()
even_numbers = {0, 2, 4, 6, 8}
even_numbers
# {0, 2, 4, 6, 8}
odd_numbers = {1, 3, 5, 7, 9}
odd_numbers
# {1, 3, 5, 7, 9}
# 3.5.2 데이터 타입 변환하기: set()
set( 'letters' )
# {'t', 'l', 's', 'e', 'r'}
set( ['Dasher', 'Dancer', 'Prancer', 'Mason-Dixon'] )
# {'Dancer', 'Dasher', 'Prancer', 'Mason-Dixon'}
set( ('Ummagumma', 'Echoes', 'Atom Heart Mother') )
# {'Atom Heart Mother', 'Ummagumma', 'Echoes'}
set( {'apple': 'red', 'orange': 'orange', 'cheery': 'red'} )
# {'cheery', 'apple', 'orange'}
# 3.5.3 in으로 값 멤버십 테스트하기
drinks = {
'martini': {'vodka', 'vermouth'},
'black russian': {'vodka', 'kahlua'},
'white russian': {'cream', 'kahlua', 'vodka'},
'manhattan': {'rye', 'vermouth', 'bitters'},
'screwdriver': {'orange juice', 'vodka'}
}
for name, contents in drinks.items():
if 'vodka' in contents:
print(name)
# martini
# black russian
# white russian
# screwdriver
for name, contents in drinks.items():
if 'vodka' in contents and not ('vermouth' in contents or
'cream' in contents):
print(name)
# black russian
# screwdriver
# 3.5.4 콤비네이션과 연산자
for name, contents in drinks.items():
if contents & {'vermouth', 'orange juice'}:
print(name)
# martini
# manhattan
# screwdriver
for name, contents in drinks.items():
if 'vodka' in contents and not contents & {'vermouth', 'cream'}:
print(name)
# black russian
# screwdriver
bruss = drinks['black russian']
wruss = drinks['white russian']
a = {1, 2}
b = {2, 3}
a & b
# {2}
a.intersection(b)
# {2}
bruss & wruss
# {'kahlua', 'vodka'}
a | b
# {1, 2, 3}
a.union(b)
# {1, 2, 3}
bruss | wruss
# {'vodka', 'cream', 'kahlua'}
a - b
# {1}
a.difference(b)
# {1}
bruss - wruss
# set()
wruss - bruss
# {'cream'}
a ^ b
# {1, 3}
a.symmetric_difference(b)
# {1, 3}
bruss ^ wruss
# {'cream'}
a <= b
# False
a.issubset(b)
# False
bruss <= wruss
# True
a <= a
# True
a.issubset(a)
# True
a < b
# False
a < a
# False
bruss < wruss
# True
a >= b
# False
a.issuperset(b)
# False
wruss >= bruss
# True
a >= a
# True
a.issuperset(a)
# True
a > b
# False
wruss > bruss
# True
| [
"freebz@hananet.net"
] | freebz@hananet.net |
b3bc967860631430270fe4a366e8ce79aa492caf | 531caac957596fc623e534bce734ef6b45be0b07 | /tests/operators/dynamic_shape/test_cast.py | 1ebaaf932f5e4f2f4265fd02ac81982bbe3f88da | [
"Apache-2.0",
"Zlib",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-2-Clause"
] | permissive | wxyhv/akg | 02e64d81bbb84472e0bf1c57a691b688ea743d6e | fc9b6f5b6fa024da89bf90466a815359ca54015d | refs/heads/master | 2023-03-11T02:59:18.472826 | 2021-02-23T07:44:16 | 2021-02-23T07:44:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,995 | py | import boot
import pytest
def test_cast():
#boot.run("test_resnet50_cast_000", "cast_run", ((64, 128, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_001", "cast_run", ((32, 64, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_002", "cast_run", ((16, 32, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_003", "cast_run", ((4, 16, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_004", "cast_run", ((49, 4, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_005", "cast_run", ((32, 4, 112, 112, 16), "float16", "float32"), "dynamic")
#boot.run("test_resnet50_cast_006", "cast_run", ((32, 4, 56, 56, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_007", "cast_run", ((32, 16, 56, 56, 16), "float16", "float32"), "dynamic")
#boot.run("test_resnet50_cast_008", "cast_run", ((36, 4, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_009", "cast_run", ((4, 4, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_010", "cast_run", ((32, 4, 56, 56, 16), "float16", "float32"), "dynamic")
#boot.run("test_resnet50_cast_011", "cast_run", ((16, 4, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_012", "cast_run", ((32, 16, 56, 56, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_013", "cast_run", ((32, 32, 28, 28, 16), "float16", "float32"), "dynamic")
#boot.run("test_resnet50_cast_014", "cast_run", ((8, 32, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_015", "cast_run", ((72, 8, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_016", "cast_run", ((16, 8, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_017", "cast_run", ((32, 8, 56, 56, 16), "float16", "float32"), "dynamic")
#boot.run("test_resnet50_cast_018", "cast_run", ((32, 8, 56, 56, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_019", "cast_run", ((32, 8, 28, 28, 16), "float16", "float32"), "dynamic")
#boot.run("test_resnet50_cast_020", "cast_run", ((32, 8, 28, 28, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_021", "cast_run", ((32, 8, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_022", "cast_run", ((32, 32, 28, 28, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_023", "cast_run", ((32, 64, 14, 14, 16), "float16", "float32"), "dynamic")
#boot.run("test_resnet50_cast_024", "cast_run", ((16, 64, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_025", "cast_run", ((144, 16, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_026", "cast_run", ((32, 16, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_027", "cast_run", ((32, 16, 28, 28, 16), "float16", "float32"), "dynamic")
#boot.run("test_resnet50_cast_028", "cast_run", ((32, 16, 28, 28, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_029", "cast_run", ((32, 16, 14, 14, 16), "float16", "float32"), "dynamic")
#boot.run("test_resnet50_cast_030", "cast_run", ((32, 16, 14, 14, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_031", "cast_run", ((64, 16, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_032", "cast_run", ((32, 64, 14, 14, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_033", "cast_run", ((32, 128, 7, 7, 16), "float16", "float32"), "dynamic")
#boot.run("test_resnet50_cast_034", "cast_run", ((32, 128, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_035", "cast_run", ((288, 32, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_036", "cast_run", ((64, 32, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_037", "cast_run", ((32, 32, 14, 14, 16), "float16", "float32"), "dynamic")
#boot.run("test_resnet50_cast_038", "cast_run", ((32, 32, 14, 14, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_039", "cast_run", ((32, 32, 7, 7, 16), "float16", "float32"), "dynamic")
#boot.run("test_resnet50_cast_040", "cast_run", ((32, 32, 7, 7, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_041", "cast_run", ((128, 32, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_042", "cast_run", ((32, 128, 7, 7, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_043", "cast_run", ((32, 4, 112, 112, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_044", "cast_run", ((32, 128, 1, 1, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_045", "cast_run", ((32, 2048, 1, 1), "float16", "float32"), "dynamic")
#boot.run("test_resnet50_cast_048", "cast_run", ((64, 128, 16, 16), "float16", "float32"), "dynamic")
#boot.run("test_resnet50_cast_049", "cast_run", ((32, 64, 16, 16), "float16", "float32"), "dynamic")
#boot.run("test_resnet50_cast_050", "cast_run", ((16, 32, 16, 16), "float16", "float32"), "dynamic")
#boot.run("test_resnet50_cast_051", "cast_run", ((4, 16, 16, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_052", "cast_run", ((49, 4, 16, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_053", "cast_run", ((36, 4, 16, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_054", "cast_run", ((4, 4, 16, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_055", "cast_run", ((16, 4, 16, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_056", "cast_run", ((8, 32, 16, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_057", "cast_run", ((72, 8, 16, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_058", "cast_run", ((16, 8, 16, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_059", "cast_run", ((32, 8, 56, 56, 16), "float32", "float16"), "dynamic")
boot.run("test_resnet50_cast_060", "cast_run", ((32, 8, 56, 56, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_061", "cast_run", ((32, 8, 16, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_062", "cast_run", ((16, 64, 16, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_063", "cast_run", ((144, 16, 16, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_064", "cast_run", ((32, 16, 16, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_065", "cast_run", ((32, 16, 28, 28, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_066", "cast_run", ((32, 16, 28, 28, 16), "float32", "float16"), "dynamic")
boot.run("test_resnet50_cast_067", "cast_run", ((64, 16, 16, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_068", "cast_run", ((32, 128, 16, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_069", "cast_run", ((288, 32, 16, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_070", "cast_run", ((64, 32, 16, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_071", "cast_run", ((32, 32, 14, 14, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_072", "cast_run", ((32, 32, 14, 14, 16), "float32", "float16"), "dynamic")
boot.run("test_resnet50_cast_073", "cast_run", ((128, 32, 16, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_074", "cast_run", ((32, 2048, 1, 1), "float32", "float16"), "dynamic")
boot.run("test_resnet50_cast_075", "cast_run", ((32, 128, 1, 1, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_080", "cast_run", ((64, 128, 16, 16), "bool", "int32"), "dynamic")
| [
"ckey.chengbin@huawei.com"
] | ckey.chengbin@huawei.com |
e9ebcdf8f07d730698b3da8ca557cdc0dc299750 | 2f63688febd21dc3ae6b19abfa79ad313c820154 | /0939_Minimum_Area_Rectangle/try_2.py | 773cc7d337543910410b251e4b4546fdfb4f8b73 | [] | no_license | novayo/LeetCode | cadd03587ee4ed6e35f60294070165afc1539ac8 | 54d0b3c237e0ffed8782915d6b75b7c6a0fe0de7 | refs/heads/master | 2023-08-14T00:35:15.528520 | 2023-07-30T05:56:05 | 2023-07-30T05:56:05 | 200,248,146 | 8 | 1 | null | 2022-11-19T04:37:54 | 2019-08-02T14:24:19 | Python | UTF-8 | Python | false | false | 995 | py | class Solution:
def minAreaRect(self, points: List[List[int]]) -> int:
# 以x軸為主,去建立一個graph
def buildGraph():
graph = {}
for element in points:
if element[0] in graph:
graph[element[0]].add(element[1])
else:
graph[element[0]] = set()
graph[element[0]].add(element[1])
return graph
graph = buildGraph()
ans = float("inf")
# 找到不同的兩個點(找對角線),如果x1有出現在y2的x上且x2有出現在y1的x上的話,就成立
for e1 in points:
for e2 in points[points.index(e1)+1:]:
if e1[0] != e2[0] and e1[1] != e2[1]:
if e1[1] in graph[e2[0]] and e2[1] in graph[e1[0]]:
ans = min(ans, abs(e1[0] - e2[0]) * abs(e1[1] - e2[1]))
return 0 if ans == float("inf") else ans
| [
"f14051172@gs.ncku.edu.tw"
] | f14051172@gs.ncku.edu.tw |
cfd8ae9767066953e1ea707ce6f3ec6793552d06 | e1e08ca2df1caadc30b5b62263fa1e769d4904d8 | /stream/models/db_wizard_auth.py | 531b7c4e5d9eaad89effd490b6e181cf48a09428 | [
"LicenseRef-scancode-public-domain"
] | permissive | tiench189/ClassbookStore | 509cedad5cc4109b8fb126ad59e25b922dfae6be | 4fff9bc6119d9ec922861cbecf23a3f676551485 | refs/heads/master | 2020-12-02T07:48:26.575023 | 2017-07-10T02:45:09 | 2017-07-10T02:45:09 | 96,728,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,873 | py | # -*- coding: utf-8 -*-
"""
#-----------------------------------------------------------------------------
# Name: db_wizard_auth
#
# Purpose:
#
# Version: 1.1
#
# Author: manhtd
#
# Created: 1/14/14
# Updated: 1/14/14
#
# Copyright: (c) Tinh Vân Books
#
# Todo:
#-----------------------------------------------------------------------------
"""
db.define_table('auth20_function_category',
Field('name', type='string', notnull=True, label=T('Category Name')),
Field('category_order', type='integer', notnull=True, default=0, label=T('Category Order')),
Field('description', type='text', label=T('Function Description')),
auth.signature, format='%(name)s')
db.define_table('auth20_function',
Field('name', type='string', notnull=True, label=T('Function Name')),
Field('category', type='reference auth20_function_category', label=T('Function Category')),
Field('aname', type='string', notnull=True, label=T('Application Name')),
Field('cname', type='string', notnull=True, label=T('Controller Name')),
Field('fname', type='string', notnull=True, default='index', label=T('Function Name')),
Field('args', type='list:string', notnull=True, label=T('Function Agruments')),
Field('vars', type='list:string', notnull=True, label=T('Function Variables')),
Field('description', type='text', label=T('Function Description')),
auth.signature, format='%(name)s')
#########################################
db.define_table('auth20_action',
Field('name', type='string', notnull=True, unique=True, label=T('Action Name')),
Field('description', type='text', label=T('Action Description')),
auth.signature, format='%(name)s')
#########################################
db.define_table('auth20_data',
Field('name', type='string', notnull=True, unique=True, label=T('Data Name')),
Field('table_name', type='string', notnull=True, label=T('Table Data')),
Field('data_condition', type='string', notnull=True, default='id>0', label=T('Data Condition')),
auth.signature, format='%(name)s')
#########################################
db.define_table('auth20_permission',
Field('group_id', type='reference auth_group', label=T('Auth Group')),
Field('actions', type='list:reference auth20_action', label=T('Auth Actions'),
requires=IS_IN_DB(db, 'auth20_action.id', '%(name)s', multiple=(1, 10000), sort=True)),
Field('functions', type='list:reference auth20_function', label=T('Auth Functions'),
requires=IS_IN_DB(db, 'auth20_function.id',
lambda r: '%s > %s > %s' % (r.aname, r.category.name, r.name),
multiple=(1, 10000), sort=True)),
Field('data_id', type='reference auth20_data', label=T('Auth Data'),
requires=IS_EMPTY_OR(IS_IN_DB(db, 'auth20_data.id', '%(id)s'))),
auth.signature)
def __authorize():
def decorate(action):
def f(*a, **b):
if auth.user:
permissions = list()
for group_id in auth.user_groups.keys():
query = db(db.auth20_permission.group_id == group_id)
query = query(db.auth20_permission.functions.contains(db.auth20_function.id))
query = query(db.auth20_function.aname == request.application)
query = query(db.auth20_function.cname == request.controller)
query = query(db.auth20_function.fname == request.function)
query = query(db.auth20_permission.actions.contains(db.auth20_action.id))
roles = query(db.auth20_action.name == 'View').select(db.auth20_permission.actions,
db.auth20_permission.data_id)
if len(roles) > 0:
for role in roles:
actions = db(db.auth20_action.id.belongs(role.actions))
actions = actions.select(db.auth20_action.name).as_list()
data = db(db.auth20_data.id == role.data_id).select(db.auth20_data.table_name,
db.auth20_data.data_condition).as_list()
permissions.append(dict(actions=actions, data=data))
if len(permissions) > 0:
auth.user.permissions = permissions
return action(*a, **b)
if request.is_restful:
raise HTTP(401)
else:
session.flash = "You don't have permission to access!"
redirect(URL(c='default', f='index'))
f.__doc__ = action.__doc__
f.__name__ = action.__name__
f.__dict__.update(action.__dict__)
return f
return decorate
auth.requires_authorize = __authorize
def __authorize_token(pos=0, total=1):
def decorate(action):
def f(*a, **b):
messages = Storage()
messages.token_invalid = "Token is invalid!"
messages.token_expired = "Token is expired!"
messages.parameter_invalid = "Parameters are invalid!"
messages.error = "Error occur!"
if len(a) != total or pos >= total:
return dict(result=False, reason=0, message=messages.parameter_invalid)
from datetime import datetime
cur_time = datetime.today()
token = a[pos]
try:
query = db(db.auth_user.token == token)
user = query.select(db.auth_user.last_login, db.auth_user.id)
if len(user) == 0:
return dict(result=False, reason=1, message=messages.token_invalid)
elif (cur_time - user.first().last_login).total_seconds() > auth.settings.expiration:
return dict(result=False, reason=2, message=messages.token_expired)
a = [(x if not i == pos else user.first().id) for i, x in enumerate(a)]
db(db.auth_user.token == token).update(last_login=cur_time)
except:
import traceback
traceback.print_exc()
return dict(result=False, reason=-1, message=messages.error)
return action(*a, **b)
f.__doc__ = action.__doc__
f.__name__ = action.__name__
f.__dict__.update(action.__dict__)
return f
return decorate
auth.requires_token = __authorize_token
| [
"caotien189@gmail.com"
] | caotien189@gmail.com |
809de9e29748f706ece9fd93d10433c46825738b | 1a7ccda7c8a5daa7b4b17e0472dd463cc3227e62 | /solve_1/39.combination-sum.py | cef4667dac1d4d8adf2945ef8b1bc38e6676a449 | [] | no_license | yoshikipom/leetcode | c362e5b74cb9e21382bf4c6275cc13f7f51b2497 | 08f93287d0ef4348ebbeb32985d3a544ecfee24c | refs/heads/main | 2023-09-02T18:46:08.346013 | 2023-08-31T23:07:55 | 2023-08-31T23:07:55 | 323,344,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | #
# @lc app=leetcode id=39 lang=python3
#
# [39] Combination Sum
#
# @lc code=start
from typing import List
class Solution:
def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:
self.result = []
def backtrack(arr: List[int], total: int, last_index: int):
# print(arr, total)
if total == target:
self.result.append(arr[:])
return
if total > target:
return
for i in range(last_index, len(candidates)):
num = candidates[i]
arr.append(num)
backtrack(arr, total+num, i)
arr.pop()
backtrack([], 0, 0)
return self.result
# @lc code=end
| [
"yoshiki.shino.tech@gmail.com"
] | yoshiki.shino.tech@gmail.com |
36534e69f03c98f3a076c28132044a865683cad9 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_044/ch168_2020_06_22_14_43_26_644897.py | 5e7c3ed0f9f34c784ea44f017c85b13df31733e2 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | def login_disponivel(nome, lista):
i=1
if nome not in lista:
return nome
else:
for name in range(len(lista)):
while nome in lista:
nome = nome +str(i)
#if nome in lista:
# nome= nome[:-1]
i+=1
return nome | [
"you@example.com"
] | you@example.com |
f532912962feb01c6b4b29fddde3864c6f0ff158 | 3169b5a8191f45140eeeea5422db5ebf7b73efd3 | /Projects/BadProjects/ALL/testJoy.py | 45870a36494be3e33eb60d803660cdaf8b8730d7 | [] | no_license | ArtemZaZ/OldAllPython-projects | e56fdae544a26418414e2e8717fe3616f28d73f9 | 01dc77f0342b8f6403b9532a13f3f89cd42d2b06 | refs/heads/master | 2021-09-07T19:17:03.607785 | 2018-02-27T20:34:03 | 2018-02-27T20:34:03 | 105,284,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | import time
import RTCjoystic
J = RTCjoystic.Joystick()
J.connect("/dev/input/js0")
J.info()
time.sleep(2)
J.start()
def hand():
print("IT'S ALIVE")
J.connectButton('trigger', hand)
while(True):
print(J.Axis.get('z'))
#print(J.Buttons.get('trigger'))
time.sleep(0.1)
J.exit()
| [
"temka.911@mail.ru"
] | temka.911@mail.ru |
bb12fdf31aff0eabe212b1d61dca6979087dd933 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02690/s266356249.py | 51ac877f9ea77fc515e2789fa39600f138aeb94e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | import sys
input = sys.stdin.readline
X = int(input())
A = 1
B = 0
while True:
while A**5 - B**5 < X:
B -= 1
if A**5 - B**5 == X:
print(A,B)
exit()
B = A
A += 1 | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
7b481e4611b931de492ac27e94a94a9b2433a087 | cca6bcec6528417842ce4cc9aee2b891c37fa421 | /pogo/proto/Networking/Responses/EchoResponse_pb2.py | 1368215e1da1ecd57acf583e22bef01d0adfed59 | [] | no_license | p0psicles/pokemongo-api | 2c1b219dcc6441399a787280e3df9446761d2230 | c1e20ae5892b045ac0b035b0f50254d94a6ac077 | refs/heads/master | 2021-01-16T23:08:42.501756 | 2016-07-20T20:51:54 | 2016-07-20T20:51:54 | 63,850,559 | 2 | 0 | null | 2016-07-21T08:17:44 | 2016-07-21T08:17:44 | null | UTF-8 | Python | false | true | 2,129 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: Networking/Responses/EchoResponse.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='Networking/Responses/EchoResponse.proto',
package='POGOProtos.Networking.Responses',
syntax='proto3',
serialized_pb=_b('\n\'Networking/Responses/EchoResponse.proto\x12\x1fPOGOProtos.Networking.Responses\"\x1f\n\x0c\x45\x63hoResponse\x12\x0f\n\x07\x63ontext\x18\x01 \x01(\tb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_ECHORESPONSE = _descriptor.Descriptor(
name='EchoResponse',
full_name='POGOProtos.Networking.Responses.EchoResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='context', full_name='POGOProtos.Networking.Responses.EchoResponse.context', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=76,
serialized_end=107,
)
DESCRIPTOR.message_types_by_name['EchoResponse'] = _ECHORESPONSE
EchoResponse = _reflection.GeneratedProtocolMessageType('EchoResponse', (_message.Message,), dict(
DESCRIPTOR = _ECHORESPONSE,
__module__ = 'Networking.Responses.EchoResponse_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Networking.Responses.EchoResponse)
))
_sym_db.RegisterMessage(EchoResponse)
# @@protoc_insertion_point(module_scope)
| [
"contact@dylanmadisetti.com"
] | contact@dylanmadisetti.com |
87ecc947f22dcc7fbfb81ec478ee7e66c27ae353 | 07b7d6244732a3fd52d431e5d3d1ab7f651b4ab0 | /src/exemplos/05_EstruturasDeDados/06_Arquivo/05-texto.py | e40ece22b49f17df2ca33cb4c3c7fd5b715ffaba | [] | no_license | GTaumaturgo/CIC-APC | 19832050efe94dd29a78bde8b6e121b990ccd6b9 | cc3b7b132218855ad50ddbe63bbdd6f94f273c54 | refs/heads/master | 2020-04-05T22:54:13.414180 | 2017-01-18T17:20:25 | 2017-01-18T17:20:25 | 42,275,384 | 0 | 0 | null | 2016-04-10T14:25:16 | 2015-09-10T23:05:54 | C | UTF-8 | Python | false | false | 546 | py | # -*- coding: utf-8 -*-
# @package: 05-texto.py
# @author: Guilherme N. Ramos (gnramos@unb.br)
# @disciplina: Algoritmos e Programação de Computadores
#
# Exemplo de uso de arquivo texto.
if __name__ == '__main__':
arquivo = 'apc.python.txt'
try:
with open(arquivo, 'w') as f:
f.write('disse o corvo, \'Nunca mais\'.\n')
f.seek(0) # Não existe a função 'rewind', mas este é um
# comportamento equivalente
f.write('D')
except:
print('Erro!')
| [
"ramos@gnramos.com"
] | ramos@gnramos.com |
536f861ee0f71641bd33ae7c7e32c1d559773a41 | 280a9dda130e27869c5ba791e7fbf502d5ca075c | /linskel.py | c7881d32f5f45fffda182ffa1d0b35b098652ad7 | [] | no_license | clbarnes/linear_skeleton | d6a9ad9ef565251143f9e5e419544a820d9cee80 | 96599a89ddd7509029069c93983388c323e27703 | refs/heads/master | 2021-06-02T11:09:53.069883 | 2018-07-11T16:56:06 | 2018-07-11T16:56:06 | 140,604,257 | 0 | 0 | null | 2021-04-28T21:54:58 | 2018-07-11T16:49:27 | Python | UTF-8 | Python | false | false | 3,001 | py | from collections import deque
import networkx as nx
from skimage.morphology import skeletonize
import numpy as np
from scipy.ndimage.filters import convolve
kernel = 2 ** np.array([
[4, 5, 6],
[3, 0, 7],
[2, 1, 0]
])
kernel[1, 1] = 0
int_reprs = np.zeros((256, 8), dtype=np.uint8)
for i in range(255):
int_reprs[i] = [int(c) for c in np.binary_repr(i, 8)]
int_reprs *= np.array([8, 7, 6, 5, 4, 3, 2, 1], dtype=np.uint8)
neighbour_locs = np.array([
(0, 0),
(-1, -1),
(-1, 0),
(-1, 1),
(0, 1),
(1, 1),
(1, 0),
(1, -1),
(0, -1)
])
def linearise_img(bin_im):
"""
Takes a binary image, skeletonises it, returns multilinestrings present in the image.
Returns a list with one item per connected component.
Each connected component is represented by a list with one item per linestring.
Each linestring is represented by a list with one item per point.
Each point is represented by a (y, x) tuple.
i.e. to get the y coordinate of the first point in the first linestring of the first connected component, use
``result[0][0][0]``
N.B. does not close rings
:param bin_im:
:return:
"""
assert np.allclose(np.unique(bin_im), np.array([0, 1]).astype(bin_im.dtype))
skeletonized = skeletonize(bin_im.astype(np.uint8)).astype(np.uint8)
convolved = (
convolve(skeletonized, kernel, mode="constant", cval=0, origin=[0, 0]) * skeletonized
).astype(np.uint8)
ys, xs = convolved.nonzero() # n length
location_bits = int_reprs[convolved[ys, xs]] # n by 8
diffs = neighbour_locs[location_bits] # n by 8 by 2
g = nx.Graph()
for yx, this_diff in zip(zip(ys, xs), diffs):
nonself = this_diff[np.abs(this_diff).sum(axis=1) > 0]
partners = nonself + yx
for partner in partners:
g.add_edge(
yx, tuple(partner),
weight=np.linalg.norm(partner - yx)
)
msf = nx.minimum_spanning_tree(g)
paths = dict(nx.all_pairs_shortest_path(msf))
for nodes in nx.connected_components(msf):
mst = msf.subgraph(nodes)
lines = []
src, *leaves = sorted(node for node, deg in mst.degree if deg == 1)
visited = set()
for leaf in leaves:
path = paths[src][leaf]
existing_path = []
new_path = []
for item in path:
if item in visited:
existing_path.append(item)
else:
new_path.append(item)
new_path = existing_path[-1:] + new_path
lines.append(new_path)
visited.update(new_path)
yield lines
if __name__ == '__main__':
import imageio
from timeit import timeit
im = imageio.imread("img/two_lines.png", pilmode='L') // 255
n = 50
time = timeit("list(linearise_img(im))", number=n, globals=globals()) / n
coords = list(linearise_img(im))
print(time)
| [
"barnesc@janelia.hhmi.org"
] | barnesc@janelia.hhmi.org |
f3534da8e15a9fda51d59846cc158f9e04c9e5f9 | 38372fcc2ca58798176267360ff07f886400bc7b | /core_scheduled_tasks/functions.py | be85100a4bd8f11b158c46009f7b925f70c2b6a7 | [] | no_license | portman-asset-finance/_GO_PAF | 4eb22c980aae01e0ad45095eb5e55e4cb4eb5189 | ee93c49d55bb5717ff1ce73b5d2df6c8daf7678f | refs/heads/master | 2020-09-21T05:22:10.555710 | 2019-11-28T16:44:17 | 2019-11-28T16:44:17 | 224,691,377 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,219 | py |
from datetime import datetime, timedelta
#####################
# #
# FUNCTION SKELETON #
# #
#####################
#
# def function_name_as_in_go_scheduler_table(job_qs, job_parameters):
#
# # Step 1: Import any function-specific modules
# # --------------------------------------------
# from a_module import a_function
#
# # Step 2: Execute your function
# # -----------------------------
# a_function.a_task(**job_parameters)
#
# # Step 3: Check the next run date
# # -------------------------------
# job_qs.next_run = job_qs.next_run + timedelta(days=1)
# job_qs.save()
#
#
# * EXISTING FUNCTIONS BELOW DO NOT WORK WITHOUT DEPENDENCIES NOT PROVIDED AS PART OF THE WORK PACKAGE*
def lazybatch(job_qs, job_params):
from core_lazybatch.functions import create_batches
# Step 1: Create batches.
# =======================
create_batches(**job_params)
# Step 2: Change next run date to tomorrow.
# =========================================
job_qs.next_run = datetime.now() + timedelta(days=1)
job_qs.save()
def sagewisdom(job_qs, job_params):
from core_sage_export.models import SageBatchHeaders
from core_sage_export.functions import build_sage_transactions_from_batch
# Step 1: Pull in unprocessed Sage Batch Headers
# ==============================================
sb_recs = SageBatchHeaders.objects.filter(processed__isnull=True)
# Step 2: Process.
# ================
for sb_rec in sb_recs:
if sb_rec.batch_header:
build_sage_transactions_from_batch(sb_rec)
# Step 3: Change next run date to tomorrow.
# =========================================
# job_qs.next_run = job_qs.next_run + timedelta(minutes=10)
# job_qs.save()
def companyinspector(job_qs, job_params):
from core_companies_house.functions import Compare_Company_House_Data
# Step 1: Company information.
# ============================
Compare_Company_House_Data(**job_params)
# Step 2: Change next run date to an hour.
# ========================================
job_qs.next_run = datetime.now() + timedelta(days=1)
job_qs.save()
| [
"portman-asset-finance@outlook.com"
] | portman-asset-finance@outlook.com |
b512093d11373376c94d052c0bfe28753e2b2185 | 9ebeb33e168798d41b54a8ab474b00c160de43a2 | /orders/admin.py | df86378c01955d423140cb8c8bebc5fec8b3fc35 | [] | no_license | danielspring-crypto/tritrade | 0c1f961138b9e4892d53ece98b54094be0e4c4b9 | 6fc7c644c1657a7744703cd144be7fbb5320397c | refs/heads/master | 2022-12-04T13:21:07.761942 | 2020-08-28T00:02:36 | 2020-08-28T00:02:36 | 290,908,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,456 | py | from django.contrib import admin
from .models import Order, OrderItem
import csv
import datetime
from django.http import HttpResponse
from django.urls import reverse
from django.utils.safestring import mark_safe
class OrderItemInline(admin.TabularInline):
model = OrderItem
raw_id_fields = ['product']
def export_to_csv(modeladmin, request, queryset):
opts = modeladmin.model._meta
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment;'\
'filename={}.csv'.format(opts.verbose_name)
writer = csv.writer(response)
fields = [field for field in opts.get_fields() if not field.many_to_many\
and not field.one_to_many]
writer.writerow([field.verbose_name for field in fields])
for obj in queryset:
data_row = []
for field in fields:
value = getattr(obj, field.name)
if isinstance(value, datetime.datetime):
value = value.strftime('%d/%m/%Y')
data_row.append(value)
writer.writerow(data_row)
return response
export_to_csv.short_description = 'Export to CSV'
def order_detail(obj):
return mark_safe('<a href="{}">View</a>'.format(reverse('orders:admin_order_detail', args=[obj.id])))
@admin.register(Order)
class OrderAdmin(admin.ModelAdmin):
list_display = ['id', 'first_name', 'last_name', 'email', 'address', 'postal_code', 'city', 'paid', 'created', 'updated', order_detail]
list_filter = ['paid', 'created', 'updated']
inlines = [OrderItemInline]
actions = [export_to_csv]
| [
"you@example.com"
] | you@example.com |
d20e0e97a7ef4f224d978a799662d41217cf965c | 12b5711e8bafc24216de2fec68e0d0d8875453d6 | /app.py | aef872ec1ee72a88fdd5b2cb1e2acdf99a028989 | [
"MIT"
] | permissive | miguelgrinberg/microflack_ui | 1167888e3fd7de40150c7e48f43e550f0b9430b6 | 2a46f4b3010d80c516b3937273ac4939e03aad67 | refs/heads/master | 2023-09-01T22:28:48.945408 | 2019-10-22T09:02:48 | 2019-10-22T09:02:48 | 89,034,662 | 19 | 12 | MIT | 2022-05-25T01:35:29 | 2017-04-22T00:52:44 | JavaScript | UTF-8 | Python | false | false | 515 | py | import os
from flask import Flask, render_template
from flask_bootstrap import Bootstrap
import config
app = Flask(__name__)
config_name = os.environ.get('FLASK_CONFIG', 'dev')
app.config.from_object(getattr(config, config_name.title() + 'Config'))
Bootstrap(app)
@app.route('/')
def index():
"""Serve client-side application."""
return render_template('index.html',
use_socketio=not app.config['NO_SOCKETIO'])
if __name__ == '__main__':
app.run() # pragma: no cover
| [
"miguel.grinberg@gmail.com"
] | miguel.grinberg@gmail.com |
32609732a13129cd185a37d0a5726b983c199eb9 | 14675f0c66fb4f4eeaa6ad1e8e691b9edf8f0bdb | /All other combo programs/List_Set_Dict_Generator_Comprehensions.py | 4ba1a3a2f0b5912a27273c61ebc118395b6c377d | [] | no_license | abhishekjoshi1991/Python_Learning | 9a94529643eac7394615289e2ecd96106e70ddb8 | a74293d0776304638b5cf976b3534481e57b17f2 | refs/heads/master | 2023-04-16T02:21:30.588052 | 2021-04-21T13:58:29 | 2021-04-21T13:58:29 | 360,176,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,083 | py | #List, Set, Dict, Generator Comprehensions
#-------------------------------------------------------------
#1. List Comprehension
#-------------------------------------------------------------
'''
uses [] symbol to define
syntax: [expression for each in iterables]
syntax: [expression for each in iterables if condition]
if there is one condition it can be mentioned after for and
if there are two conditions then it has to be mentioned
before for
List comprehensions are used for creating new lists
from other iterables like tuples, strings, arrays,
lists, etc.
'''
print('\n')
print('list comprehension')
#Program-1
#Squares of numbers from 1 to 10
print([x**2 for x in range(1,11)])
#Program-2
#squares of even numbers from 1 to 10
print([x**2 for x in range(1,11) if x%2==0])
#Program-3
#squares of even numbers and cubes of odd nums from 1 to 10
print([x**2 if x%2==0 else x**3 for x in range(1,11)])
#Program-3
#to print hello world many times as per len of string
print(['hello' for x in 'ethans'])
#Program-4
#[11,33,50]-->113350
l1=[11,33,50]
print(int(''.join(str(i) for i in l1)))
#Program-5
#Program to print table of 5
print([i*5 for i in range(1,11)])
#Program-6
#Nested list comprehension
print([[j for j in range(1,5)] for i in range(0,3)])
#Program-7
#list comprehension with lambda
#to display table of 6
print(list(map(lambda x:x*6,[x for x in range(1,11)])))
#Program-8
#Reverse each string in tuple
print([i[::-1] for i in ('Geeks', 'for', 'Geeks')])
#-------------------------------------------------------------
#2. Set Comprehension
#-------------------------------------------------------------
'''
gives u ique elements and uses {} brackets
'''
print('\n')
print('set comprehension')
l1=[1,2,3,2,2,3,5,3,5,3,5]
print({x**2 for x in l1})
#-------------------------------------------------------------
#3. generator Comprehension
#-------------------------------------------------------------
'''
uses () brackets.
it throws the object at o/p, data can be genereated through
it whenever required by iterating over it or type cast it
into suitable data type
'''
print('\n')
print('generator comprehension')
print((x**2 for x in range(1,10)))#throws object
a=(x**2 for x in range(1,10))
print(list(a))
#-------------------------------------------------------------
#4. dict Comprehension
#-------------------------------------------------------------
'''
it also uses {} brackets but it contains two expression
one for key and other for value.
two expressions are seperated by colon:
syntax:{expression1:expression2 for each in iterable}
'''
print('\n')
print('dict comprehension')
print({x:x**2 for x in range(1,11)})
#program to print {1:'A',2:'B'...}
print({x:chr(x+64) for x in range(1,27)})
#Program to inverse the given dict
d1={1:'A',2:'B',3:'C',4:'D'}
print({y:x for x,y in d1.items()})
print({d1[i]:i for i in d1})
#Program to find occurances of elements from list
l=[1,2,2,3,4,2,2,3,3,4,4,5,6]
print({i:l.count(i) for i in l})#as duplicated keys can not be present
#optimized program of above
print({i:l.count(i) for i in set(l)})
| [
"abhijsh61@gmail.com"
] | abhijsh61@gmail.com |
a83a6406c180911bbf530d33d506e5cfbe3c240b | cbfb679bd068a1153ed855f0db1a8b9e0d4bfd98 | /leet/greedy/1383_Maximum_Performance_of_a_Team.py | 2a07bfc817f1708de9c9eecd51851bd7153eec00 | [] | no_license | arsamigullin/problem_solving_python | 47715858a394ba9298e04c11f2fe7f5ec0ee443a | 59f70dc4466e15df591ba285317e4a1fe808ed60 | refs/heads/master | 2023-03-04T01:13:51.280001 | 2023-02-27T18:20:56 | 2023-02-27T18:20:56 | 212,953,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,016 | py | import heapq
from typing import List
class Solution:
def maxPerformance(self, n: int, speed: List[int], efficiency: List[int], k: int) -> int:
ef_sp = zip(efficiency, speed)
# it is important step. We will have sorted by efficiency in Desc order array
ef_sp = sorted(ef_sp, key=lambda x: x[0], reverse=True)
print(ef_sp)
speed_heap = []
perf = 0
sum_speed = 0
for e, s in ef_sp:
# since we first check and only then add to the queue, we use k-1 here
# once we have a team of k members, before adding a new member
if len(speed_heap) > k - 1:
# we extract the member with the lowest speed
sum_speed -= heapq.heappop(speed_heap)
heapq.heappush(speed_heap, s)
sum_speed += s
perf = max(perf, sum_speed * e)
return perf % (10 ** 9 + 7)
if __name__ == '__main__':
s = Solution()
s.maxPerformance(6,[2,10,3,1,5,8],[5,4,3,9,7,2],2) | [
"ar.smglln@gmail.com"
] | ar.smglln@gmail.com |
6135c68dd55c44f72e47a858216e8c329d8d7419 | 5d28c38dfdd185875ba0edaf77281e684c81da0c | /mlflow/pipelines/__init__.py | 303c3ad04f3f5bd62219cd57ad2a4e454ab9bc32 | [
"Apache-2.0"
] | permissive | imrehg/mlflow | 3a68acc1730b3ee6326c1366760d6ddc7e66099c | 5ddfe9a1b48e065540094d83125040d3273c48fa | refs/heads/master | 2022-09-24T05:39:02.767657 | 2022-09-20T00:14:07 | 2022-09-20T00:14:07 | 244,945,486 | 1 | 0 | Apache-2.0 | 2020-03-04T16:11:54 | 2020-03-04T16:11:53 | null | UTF-8 | Python | false | false | 1,738 | py | # pylint: disable=line-too-long
"""
MLflow Pipelines is an opinionated framework for structuring MLOps workflows that simplifies and
standardizes machine learning application development and productionization. MLflow Pipelines
makes it easy for data scientists to follow best practices for creating production-ready ML
deliverables, allowing them to focus on developing excellent models. MLflow Pipelines also enables
ML engineers and DevOps teams to seamlessly deploy these models to production and incorporate them
into applications.
MLflow Pipelines provides production-quality :ref:`Pipeline Templates <pipeline-templates>` for
common ML problem types, such as regression & classification, and MLOps tasks, such as batch
scoring. Pipelines are structured as git repositories with YAML-based configuration files and
Python code, offering developers a declarative approach to ML application development that reduces
boilerplate.
MLflow Pipelines also implements a cache-aware executor for pipeline steps, ensuring that steps
are only executed when associated
:py:ref:`code or configurations <pipeline-repositories-key-concept>` have changed. This enables
data scientists, ML engineers, and DevOps teams to iterate very quickly within their domains of
expertise. MLflow offers |run() APIs| for executing pipelines, as well as an
|mlflow pipelines run CLI|.
For more information, see the :ref:`MLflow Pipelines Overview <pipelines>`.
.. |mlflow pipelines run CLI| replace:: :ref:`mlflow pipelines run <cli>` CLI
.. |run() APIs| replace:: :py:func:`run() <mlflow.pipelines.regression.v1.pipeline.RegressionPipeline.run>` APIs
"""
# pylint: enable=line-too-long
from mlflow.pipelines.pipeline import Pipeline
__all__ = ["Pipeline"]
| [
"noreply@github.com"
] | imrehg.noreply@github.com |
79d103fb39c6bbed9c3fefba0bd1f83375a6608c | 8f24e443e42315a81028b648e753c50967c51c78 | /python/ray/tune/config_parser.py | bae26454b0392acb0dcec18d469fc0de2144e326 | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | simon-mo/ray | d07efdada8d05c6e10417f96e8dfc35f9ad33397 | 1e42e6cd15e2fb96c217cba8484e59ed0ef4b0c8 | refs/heads/master | 2023-03-06T00:09:35.758834 | 2022-12-23T18:46:48 | 2022-12-23T18:46:48 | 122,156,396 | 4 | 2 | Apache-2.0 | 2023-03-04T08:56:56 | 2018-02-20T04:47:06 | Python | UTF-8 | Python | false | false | 205 | py | from ray.tune._structure_refactor import warn_structure_refactor
from ray.tune.experiment.config_parser import * # noqa: F401, F403
warn_structure_refactor(__name__, "ray.tune.experiment.config_parser")
| [
"noreply@github.com"
] | simon-mo.noreply@github.com |
c89ad2f2c715d426be96676525cbe2cbbe7e083d | 6622c0cd289ec73078d5cf1cb88d9246160087ef | /src/day12二叉树问题/test.py | bfff24930aaced0e0f2c8f870420955dd998217d | [] | no_license | chifeng111/python_demo | 366540e8b284b4d3f2ac2377a9187a4be45192b5 | af3404935aa7148b7eb41e63b5bb782d5995e01b | refs/heads/master | 2021-01-02T08:53:53.033682 | 2017-10-14T12:34:22 | 2017-10-14T12:34:22 | 98,880,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,580 | py | # coding: utf-8
'''
{0x00, 0x01},
{0x02, 0x03},
{0x03, 0x04},
{0x05, 0x06}
'''
import sys
def AddDependency(m1, m2, _map):
if m1 not in _map:
_map[m1] = [m2]
else:
_map[m1].append(m2)
if m2 not in _map:
_map[m2] = []
def ModulesCycleDependency(m, _map):
d = _map[m]
if not d:
return False
if m in d:
return True
while d:
v = d.pop()
if ModulesCycleDependency(v, _map):
return True
return False
def myprint(dependencyMap):
k = dependencyMap.keys()
k = list(k)
for i in range(len(k) - 1):
if dependencyMap[k[i]]:
print("{" + "{}, {}".format(k[i], 'true') + "},")
else:
print("{" + "{}, {}".format(k[i], 'false') + "},")
if dependencyMap[k[len(k) - 1]]:
print("{" + "{}, {}".format(k[len(k) - 1], 'true') + "}")
else:
print("{" + "{}, {}".format(k[len(k) - 1], 'false') + "}")
if __name__ == '__main__':
_map = {}
while True:
s = sys.stdin.readline().strip()
if s[-1] != ",":
m1, m2 = s[1:-1].split(",")[0], s[1:-1].split(",")[1]
m2 = m2[1:]
AddDependency(m1, m2, _map)
break
else:
m1, m2 = s[1:-2].split(",")[0], s[1:-2].split(",")[1]
m2 = m2[1:]
AddDependency(m1, m2, _map)
dependencyMap = {}
for i in _map.keys():
if ModulesCycleDependency(i, _map):
dependencyMap[i] = True
else:
dependencyMap[i] = False
myprint(dependencyMap)
| [
"liaozhenhua1129@gmail.com"
] | liaozhenhua1129@gmail.com |
7070b76cab38dbd62ff206a9f2c312c8e9a3b96e | 4266e9b1c59ddef83eede23e0fcbd6e09e0fa5cb | /vs/gyp/test/mac/gyptest-app-error.py | 8371bb26870f6403beff80717b411c765231fd6d | [
"BSD-3-Clause"
] | permissive | barrystudy/study | b3ba6ed652d1a0bcf8c2e88a2a693fa5f6bf2115 | 96f6bb98966d3633b47aaf8e533cd36af253989f | refs/heads/master | 2020-12-24T14:53:06.219236 | 2017-10-23T02:22:28 | 2017-10-23T02:22:28 | 41,944,841 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,215 | py | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that invalid strings files cause the build to fail.
"""
import TestCmd
import TestGyp
import sys
if sys.platform == 'darwin':
expected_error = 'Old-style plist parser: missing semicolon in dictionary'
saw_expected_error = [False] # Python2 has no "nonlocal" keyword.
def match(a, b):
if a == b:
return True
if not TestCmd.is_List(a):
a = a.split('\n')
if not TestCmd.is_List(b):
b = b.split('\n')
if expected_error in '\n'.join(a) + '\n'.join(b):
saw_expected_error[0] = True
return True
return False
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'], match=match)
test.run_gyp('test-error.gyp', chdir='app-bundle')
test.build('test-error.gyp', test.ALL, chdir='app-bundle')
# Ninja pipes stderr of subprocesses to stdout.
if test.format == 'ninja' and expected_error in test.stdout():
saw_expected_error[0] = True
if saw_expected_error[0]:
test.pass_test()
else:
test.fail_test()
| [
"2935973620@qq.com"
] | 2935973620@qq.com |
aa95213f5dfd8f12244b3ad4c0357b0ec1ab1210 | 90673e9b40a95a4f33b22172339cc67fd7b3cc1d | /boostedhiggs/nanoevents.py | b4050b4b3ea4f9f08587b02c90a36ec6ce6975b4 | [] | no_license | SangeonPark/boostedhiggs | f2b86bb9724e2c188692a9a319cc6ea16f2a78fd | bb2f0f6c111dd67a3aa2af215e8fe412cff71548 | refs/heads/master | 2023-04-11T00:59:23.618041 | 2019-12-11T23:16:37 | 2019-12-11T23:16:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,595 | py | import numpy as np
import awkward as ak
from .methods import (
METVector,
LorentzVector,
Candidate,
Electron,
Muon,
Photon,
Tau,
)
def _mixin(methods, awkwardtype):
'''Like ak.Methods.mixin but also captures methods in dir() and propagate docstr'''
newtype = type(methods.__name__ + 'Array', (methods, awkwardtype), {})
newtype.__dir__ = lambda self: dir(methods) + awkwardtype.__dir__(self)
newtype.__doc__ = methods.__doc__
return newtype
class NanoCollection(ak.VirtualArray):
@classmethod
def _lazyflatten(cls, array):
return array.array.content
@classmethod
def from_arrays(cls, arrays, name, methods=None):
'''
arrays : object
An object with attributes: columns, __len__, and __getitem__
where the latter returns virtual arrays or virtual jagged arrays
'''
jagged = 'n' + name in arrays.columns
columns = {k[len(name) + 1:]: arrays[k] for k in arrays.columns if k.startswith(name + '_')}
if len(columns) == 0:
# single-item collection, just forward lazy array (possibly jagged)
if name not in arrays.columns:
raise RuntimeError('Could not find collection %s in dataframe' % name)
if methods:
ArrayType = _mixin(methods, type(arrays[name]))
return ArrayType(arrays[name])
return arrays[name]
elif not jagged:
if methods is None:
Table = ak.Table
else:
Table = _mixin(methods, ak.Table)
table = Table.named(name)
for k, v in columns.items():
table[k] = v
return table
else: # jagged
if methods:
cls = _mixin(methods, cls)
tabletype = ak.type.TableType()
for k, array in columns.items():
tabletype[k] = array.type.to.to
counts = arrays['n' + name]
out = cls(
cls._lazyjagged,
(name, counts, columns, methods),
type=ak.type.ArrayType(len(arrays), float('inf'), tabletype),
)
out.__doc__ = counts.__doc__
return out
@classmethod
def _lazyjagged(cls, name, counts, columns, methods=None):
offsets = ak.JaggedArray.counts2offsets(counts.array)
if methods is None:
JaggedArray = ak.JaggedArray
Table = ak.Table
else:
JaggedArray = _mixin(methods, ak.JaggedArray)
Table = _mixin(methods, ak.Table)
table = Table.named(name)
for k, v in columns.items():
if not isinstance(v, ak.VirtualArray):
raise RuntimeError
col = type(v)(NanoCollection._lazyflatten, (v,), type=ak.type.ArrayType(offsets[-1], v.type.to.to))
col.__doc__ = v.__doc__
table[k] = col
out = JaggedArray.fromoffsets(offsets, table)
out.__doc__ = counts.__doc__
return out
def _lazyindexed(self, indices, destination):
if not isinstance(destination.array, ak.JaggedArray):
raise RuntimeError
if not isinstance(self.array, ak.JaggedArray):
raise NotImplementedError
content = np.zeros(len(self.array.content) * len(indices), dtype=ak.JaggedArray.INDEXTYPE)
for i, k in enumerate(indices):
content[i::len(indices)] = np.array(self.array.content[k])
globalindices = ak.JaggedArray.fromoffsets(
self.array.offsets,
content=ak.JaggedArray.fromoffsets(
np.arange((len(self.array.content) + 1) * len(indices), step=len(indices)),
content,
)
)
globalindices = globalindices[globalindices >= 0] + destination.array.starts
out = globalindices.copy(
content=type(destination.array).fromoffsets(
globalindices.content.offsets,
content=destination.array.content[globalindices.flatten().flatten()]
)
)
return out
def __setitem__(self, key, value):
if self.ismaterialized:
super(NanoCollection, self).__setitem__(key, value)
_, _, columns, _ = self._args
columns[key] = value
self._type.to.to[key] = value.type.to.to
def __delitem__(self, key):
if self.ismaterialized:
super(NanoCollection, self).__delitem__(key)
_, _, columns, _ = self._args
del columns[key]
del self._type.to.to[key]
class NanoEvents(ak.Table):
collection_methods = {
'CaloMET': METVector,
'ChsMET': METVector,
'GenMET': METVector,
'MET': METVector,
'METFixEE2017': METVector,
'PuppiMET': METVector,
'RawMET': METVector,
'TkMET': METVector,
# pseudo-lorentz: pt, eta, phi, mass=0
'IsoTrack': LorentzVector,
'SoftActivityJet': LorentzVector,
'TrigObj': LorentzVector,
# True lorentz: pt, eta, phi, mass
'FatJet': LorentzVector,
'GenDressedLepton': LorentzVector,
'GenJet': LorentzVector,
'GenJetAK8': LorentzVector,
'GenPart': LorentzVector,
'Jet': LorentzVector,
'LHEPart': LorentzVector,
'SV': LorentzVector,
'SubGenJetAK8': LorentzVector,
'SubJet': LorentzVector,
# Candidate: LorentzVector + charge
'Electron': Electron,
'Muon': Muon,
'Photon': Photon,
'Tau': Tau,
'GenVisTau': Candidate,
}
@classmethod
def from_arrays(cls, arrays, collection_methods_overrides={}):
events = cls.named('event')
collections = {k.split('_')[0] for k in arrays.columns}
collections -= {k for k in collections if k.startswith('n') and k[1:] in collections}
allmethods = {**cls.collection_methods, **collection_methods_overrides}
for name in collections:
methods = allmethods.get(name, None)
events[name] = NanoCollection.from_arrays(arrays, name, methods)
# finalize
del events.Photon['mass']
embedded_subjets = type(events.SubJet)(
events.FatJet._lazyindexed,
args=(['subJetIdx1', 'subJetIdx2'], events.SubJet),
type=ak.type.ArrayType(len(events), float('inf'), float('inf'), events.SubJet.type.to.to),
)
embedded_subjets.__doc__ = events.SubJet.__doc__
events.FatJet['subjets'] = embedded_subjets
return events
| [
"nick.smith@cern.ch"
] | nick.smith@cern.ch |
8b199a38bfca4e56b6fc689c0255d55c4c2c5db7 | 51fd9e45e48bd1cea58207f6d3d472e83b419194 | /src/scripts/2diff.py | 7f32a67190a46683b63c2c03df8a100caa0bbc3e | [
"Apache-2.0"
] | permissive | helioxgroup/deepspeech-reconstruction | 6076f4405dd1287723436b558c694f5ece415179 | 72f28d1e9064d221b3421c302a8725a8c71859ee | refs/heads/main | 2023-04-04T07:25:20.488237 | 2021-04-15T21:24:33 | 2021-04-15T21:24:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 946 | py | # Test 2nd derivative computation
import numpy as np
import tensorflow as tf
np.random.seed(0)
# loss = 'ctc'
loss = 'ce'
bs = 32
fdim = 26
ilen = 100
olen = 10
nlabels = 28
x = tf.Variable(np.random.rand(bs, ilen, fdim), dtype=tf.float32)
W = tf.Variable(np.random.rand(fdim, nlabels), dtype=tf.float32)
with tf.GradientTape() as g1:
with tf.GradientTape() as g2:
logits = tf.linalg.matmul(x, W)
if loss == 'ctc':
logits = tf.transpose(logits, [1, 0, 2])
y = tf.Variable(np.random.randint(0, nlabels, (bs, olen)))
loss = tf.reduce_mean(tf.nn.ctc_loss(y, logits, [olen] * bs, [ilen] * bs))
elif loss == 'ce':
y = tf.Variable(np.random.rand(bs, ilen, nlabels), dtype=tf.float32)
loss = tf.nn.log_poisson_loss(y, logits)
g2.watch(W)
dl_dW = g2.gradient(loss, W)
d = tf.linalg.norm(dl_dW)
dd_dx = g1.gradient(d, x)
print(dd_dx) | [
"trungv.dang@outlook.com"
] | trungv.dang@outlook.com |
4fbeac8a60c377c4ab8bb5b1c063ee1960165f4b | ef1458fae5fbd6b7a9281ccd4d9bc8289f3dd38b | /tests/test_samplestats.py | 17c502dcdc229ebcbd94c60f9ab5f831a554c7ce | [
"BSD-3-Clause"
] | permissive | vt100/mystic | a42910537c3de90d1c2a5637bad5d866308e8863 | 7589eee4b9a7cb6056114ee6770579d173d9007b | refs/heads/master | 2021-01-17T22:28:57.743493 | 2015-07-17T15:25:35 | 2015-07-17T15:25:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,392 | py | import numpy as np
from mystic.math.measures import _k
if __name__ == '__main__':
# even-length
w = [3,1,1,1,3,3]
assert _k(w) == w
# even-length clipping
assert (np.array(_k(w,(10,10),clip=True)) > 0).tolist() == [1,1,1,1,1,1]
assert (np.array(_k(w,(25,25),clip=True)) > 0).tolist() == [0,1,1,1,1,0]
assert (np.array(_k(w,(50,50),clip=True)) > 0).tolist() == [0,0,0,1,1,0]
assert (np.array(_k(w,(49,50),clip=True)) > 0).tolist() == [0,0,0,1,0,0]
assert (np.array(_k(w,(50,49),clip=True)) > 0).tolist() == [0,0,0,0,1,0]
assert (np.array(_k(w,(49,49),clip=True)) > 0).tolist() == [0,0,0,1,1,0]
assert (np.array(_k(w,(25,75),clip=True)) > 0).tolist() == [1,1,0,0,0,0]
assert (np.array(_k(w,(24,75),clip=True)) > 0).tolist() == [1,0,0,0,0,0]
assert (np.array(_k(w,(25,74),clip=True)) > 0).tolist() == [0,1,0,0,0,0]
assert (np.array(_k(w,(24,74),clip=True)) > 0).tolist() == [1,1,0,0,0,0]
assert (np.array(_k(w,(75,25),clip=True)) > 0).tolist() == [0,0,0,0,1,1]
assert (np.array(_k(w,(74,25),clip=True)) > 0).tolist() == [0,0,0,0,1,0]
assert (np.array(_k(w,(75,24),clip=True)) > 0).tolist() == [0,0,0,0,0,1]
assert (np.array(_k(w,(74,24),clip=True)) > 0).tolist() == [0,0,0,0,1,1]
# even-length trimming
assert (np.array(_k(w,(10,10))) > 0).tolist() == [1,1,1,1,1,1]
assert (np.array(_k(w,(25,25))) > 0).tolist() == [0,1,1,1,1,0]
assert (np.array(_k(w,(50,50))) > 0).tolist() == [0,0,0,0,0,0]
assert (np.array(_k(w,(49,50))) > 0).tolist() == [0,0,0,1,0,0]
assert (np.array(_k(w,(50,49))) > 0).tolist() == [0,0,0,0,1,0]
assert (np.array(_k(w,(49,49))) > 0).tolist() == [0,0,0,1,1,0]
assert (np.array(_k(w,(25,75))) > 0).tolist() == [0,0,0,0,0,0]
assert (np.array(_k(w,(24,75))) > 0).tolist() == [1,0,0,0,0,0]
assert (np.array(_k(w,(25,74))) > 0).tolist() == [0,1,0,0,0,0]
assert (np.array(_k(w,(24,74))) > 0).tolist() == [1,1,0,0,0,0]
assert (np.array(_k(w,(75,25))) > 0).tolist() == [0,0,0,0,0,0]
assert (np.array(_k(w,(74,25))) > 0).tolist() == [0,0,0,0,1,0]
assert (np.array(_k(w,(75,24))) > 0).tolist() == [0,0,0,0,0,1]
assert (np.array(_k(w,(74,24))) > 0).tolist() == [0,0,0,0,1,1]
# odd-length
w = [4,2,4,2,4]
assert _k(w) == w
# odd-length clipping
assert (np.array(_k(w,(10,10),clip=True)) > 0).tolist() == [1,1,1,1,1]
assert (np.array(_k(w,(25,25),clip=True)) > 0).tolist() == [0,1,1,1,0]
assert (np.array(_k(w,(50,50),clip=True)) > 0).tolist() == [0,0,1,0,0]
assert (np.array(_k(w,(37.5,37.5),clip=True)) > 0).tolist() == [0,0,1,0,0]
assert (np.array(_k(w,(37.4,37.5),clip=True)) > 0).tolist() == [0,1,1,0,0]
assert (np.array(_k(w,(37.5,37.4),clip=True)) > 0).tolist() == [0,0,1,1,0]
assert (np.array(_k(w,(37.4,37.4),clip=True)) > 0).tolist() == [0,1,1,1,0]
assert (np.array(_k(w,(25,75),clip=True)) > 0).tolist() == [1,1,0,0,0]
assert (np.array(_k(w,(24,75),clip=True)) > 0).tolist() == [1,0,0,0,0]
assert (np.array(_k(w,(25,74),clip=True)) > 0).tolist() == [0,1,0,0,0]
assert (np.array(_k(w,(24,74),clip=True)) > 0).tolist() == [1,1,0,0,0]
assert (np.array(_k(w,(75,25),clip=True)) > 0).tolist() == [0,0,0,1,1]
assert (np.array(_k(w,(74,25),clip=True)) > 0).tolist() == [0,0,0,1,0]
assert (np.array(_k(w,(75,24),clip=True)) > 0).tolist() == [0,0,0,0,1]
# odd-length trimming
assert (np.array(_k(w,(10,10))) > 0).tolist() == [1,1,1,1,1]
assert (np.array(_k(w,(25,25))) > 0).tolist() == [0,1,1,1,0]
assert (np.array(_k(w,(50,50))) > 0).tolist() == [0,0,0,0,0]
assert (np.array(_k(w,(37.5,37.5))) > 0).tolist() == [0,0,1,0,0]
assert (np.array(_k(w,(37.4,37.5))) > 0).tolist() == [0,1,1,0,0]
assert (np.array(_k(w,(37.5,37.4))) > 0).tolist() == [0,0,1,1,0]
assert (np.array(_k(w,(37.4,37.4))) > 0).tolist() == [0,1,1,1,0]
assert (np.array(_k(w,(25,75))) > 0).tolist() == [0,0,0,0,0]
assert (np.array(_k(w,(24,75))) > 0).tolist() == [1,0,0,0,0]
assert (np.array(_k(w,(25,74))) > 0).tolist() == [0,1,0,0,0]
assert (np.array(_k(w,(24,74))) > 0).tolist() == [1,1,0,0,0]
assert (np.array(_k(w,(75,25))) > 0).tolist() == [0,0,0,0,0]
assert (np.array(_k(w,(74,25))) > 0).tolist() == [0,0,0,1,0]
assert (np.array(_k(w,(75,24))) > 0).tolist() == [0,0,0,0,1]
assert (np.array(_k(w,(74,24))) > 0).tolist() == [0,0,0,1,1]
# EOF
| [
"mmckerns@968178ea-60bd-409e-af13-df8a517b6005"
] | mmckerns@968178ea-60bd-409e-af13-df8a517b6005 |
c209285b831f71b03207b5130742b461b5e9cdad | 8e3eb5fa2cf80e2f6d265faaa410cf850ca01242 | /화물 도크.py | 7156bac8cd7dcaf79511872ede0783776c203861 | [] | no_license | first0506/Algorithm-Problem-Solving | 7a35f5fc1ea5dc0c06e3fc4b96abcbaf85fd13b1 | 4ef67297ead3eba0711de0f49b8c099ffaa29bf8 | refs/heads/master | 2022-11-06T17:18:16.360292 | 2020-07-06T11:38:12 | 2020-07-06T11:38:12 | 263,899,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | T = int(input())
for test_case in range(1, T+1):
N = int(input())
times = []
for _ in range(N):
times.append(list(map(int, input().split())))
times = sorted(times, key=lambda x:x[1])
end = times[0][0]
cnt = 1
for i in range(1, N):
if end <= times[i][0]:
end = times[i][1]
cnt += 1
print('#{} {}'.format(test_case, cnt)) | [
"first0506@naver.com"
] | first0506@naver.com |
25f627eefb8b0a812a8709085b737eedc77257f3 | 65bea3dc57eb4d6bc27fe53418c8c4bbcd8f0ca3 | /profiles/migrations/0001_initial.py | d6fc76c35694baff5230c27308233e5bcdbdfac3 | [
"MIT"
] | permissive | ezekieltech/eduTech-backend | e339e1eae12529ae414a9220b67b428afdaa057f | 33b82f57add98285b73d89bc9d97f499cdb3f1e4 | refs/heads/main | 2023-03-25T22:31:01.704584 | 2021-03-21T04:12:31 | 2021-03-21T04:12:31 | 326,237,291 | 0 | 0 | MIT | 2021-03-17T16:38:54 | 2021-01-02T17:39:46 | Python | UTF-8 | Python | false | false | 2,087 | py | # Generated by Django 3.1.5 on 2021-03-13 15:56
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='EduconsultantProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(blank=True, max_length=30, null=True)),
('last_name', models.CharField(blank=True, max_length=30, null=True)),
('bio', models.TextField(blank=True, null=True)),
('gender', models.CharField(blank=True, max_length=10, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='MenteeProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(blank=True, max_length=30, null=True)),
('last_name', models.CharField(blank=True, max_length=30, null=True)),
('bio', models.TextField(blank=True, null=True)),
('gender', models.CharField(blank=True, max_length=10, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='MentorProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(blank=True, max_length=30, null=True)),
('last_name', models.CharField(blank=True, max_length=30, null=True)),
('bio', models.TextField(blank=True, null=True)),
('gender', models.CharField(blank=True, max_length=10, null=True)),
],
options={
'abstract': False,
},
),
]
| [
"ezekielobhafuoso@gmail.com"
] | ezekielobhafuoso@gmail.com |
8df77a27d2b747cb1e1fafc772f2d5e5fad088d6 | 5ae615019b126421a9ccd66fd6c9052af9a27923 | /opem/Test/test_Functions.py | b7aae03591b49561955dd7de66798d7a99167193 | [
"MIT"
] | permissive | guoyanyanyun/opem | 62b401f12d990309b19b08f0637782dd408accce | 02e946397f132802b27a9384d7ff7ba4a7fca580 | refs/heads/master | 2020-04-09T11:12:00.413549 | 2018-11-12T09:44:14 | 2018-11-12T09:44:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,264 | py | # -*- coding: utf-8 -*-
'''
>>> from opem.Functions import *
>>> data=[i for i in range(100)]
>>> integrate(data,1)
4867.666666666666
>>> data[0]=None
>>> integrate(data,1)
>>> linear_plot([1,2,3],[2,4,6])
[[2.0, 4.0, 6.0], 0.0, 2.0]
>>> isfloat("2")
True
>>> isfloat("2.02")
True
>>> isfloat('ss')
False
>>> filter_lambda({"lambda":24})
[Warning] Opem Automatically Set Lambda To Maximum Value (23)
{'lambda': 23}
>>> filter_alpha({"alpha":2})
[Warning] Opem Automatically Set Alpha To Maximum Value (1)
{'alpha': 1}
>>> filter_lambda({"lambda":13})
[Warning] Opem Automatically Set Lambda To Minimum Value (14)
{'lambda': 23}
>>> filter_alpha({"alpha":-0.1})
[Warning] Opem Automatically Set Alpha To Maximum Value (0)
{'alpha': 0}
>>> Input_dict=Get_Input({"T": "Cell Operation Temperature [K]", "PH2": "Partial Pressure [atm]", "PO2": "Partial Pressure [atm]"},input_item=input_test)
>>> Input_keys=list(Input_dict.keys())
>>> Input_keys.sort()
>>> print(Input_keys)
['Name', 'PH2', 'PO2', 'T']
>>> description_print("Model1",{"Model1":"Test"})
###########
<BLANKLINE>
<BLANKLINE>
Test
<BLANKLINE>
<BLANKLINE>
###########
>>> check_update(1)
>>> check_update(0.1)
###########
New Version (0.9) Is Available!
Website : http://www.ecsim.ir/opem
###########
'''
| [
"sepand.haghighi@yahoo.com"
] | sepand.haghighi@yahoo.com |
c947f71d2d1777f4abb4dfb167d066818ab7a3ff | 09120532659f7eb134163f92ac2f65423a04dc03 | /zproject/django/survey/teacher/migrations/0001_initial.py | 2b17accc1d2d1a6230850e396f2749e5e35e5155 | [] | no_license | hoboland21/survey | 7b2dafd76db0e9317037a0cec163a97c0ec9a8ec | 93e71f3304b381a6be03c8f813d2ba3a0b6eb218 | refs/heads/master | 2023-01-28T07:38:38.934710 | 2019-05-13T08:55:41 | 2019-05-13T08:55:41 | 182,874,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,853 | py | # Generated by Django 2.2 on 2019-04-30 06:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ImportFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(blank=True, max_length=256)),
('document', models.FileField(upload_to='uploads/')),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('format', models.CharField(blank=True, max_length=30)),
('question', models.CharField(max_length=2048)),
('label', models.CharField(blank=True, max_length=120)),
('group', models.CharField(blank=True, max_length=120)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Survey',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
('requester', models.CharField(max_length=256)),
('description', models.CharField(max_length=1024)),
('subject', models.CharField(max_length=256)),
('label', models.CharField(blank=True, max_length=120)),
('group', models.CharField(blank=True, max_length=120)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('group', models.CharField(blank=True, max_length=120)),
('label', models.CharField(blank=True, max_length=120)),
('test_code', models.CharField(max_length=32)),
('name', models.CharField(max_length=128)),
('created', models.DateTimeField(auto_now=True)),
('survey', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='teacher.Survey')),
],
),
migrations.CreateModel(
name='Items',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(blank=True, max_length=120)),
('group', models.CharField(blank=True, max_length=120)),
('sequence', models.SmallIntegerField()),
('page', models.SmallIntegerField(default=1)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='teacher.Question')),
('survey', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='teacher.Survey')),
],
),
migrations.CreateModel(
name='Answers',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('answer', models.CharField(max_length=10)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='teacher.Question')),
('student', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='teacher.Student')),
],
),
]
| [
"jc@saipantech.com"
] | jc@saipantech.com |
e70928a610cf94f7cb28e1ce8d31ce8d6dd954ef | df42b0d05038a2940606591d548637bc51d6573d | /mounth02/day11/demo02.py | fed207512a913987794058f8243ffb92c9eece52 | [] | no_license | ThreePointFive/aid1907_0814 | 440113f5ae2df28e53a088bd3ea420d5558214b4 | 99eea9aafdf8211278425c33aba2e64d5eb2500b | refs/heads/master | 2022-12-03T19:31:21.085608 | 2019-11-09T06:25:33 | 2019-11-09T06:25:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | '''property
属性来控制输出
'''
class Wife:
def __init__(self,name,age):
self.name = name
self.age=age
def get_age(self):
return self.age
def set_age(self,age):
self.age=age
property=(get_age,set_age)
w01=Wife('caster',25)
w01.set_age(268)
print(w01.age)
| [
"760373741@qq.com"
] | 760373741@qq.com |
3d817ef9508b53859f184549c58777d00d0ecbf7 | 4b7db29ef0eede67efbb55baf0c300a7003b8310 | /Section 3/Designer_code/Video2_First_Design.py | 3133f9c7a8794e9b105a7227870409ddb1e4467b | [
"MIT"
] | permissive | PacktPublishing/-Hands-on-Python-3.x-GUI-Programming | 40ffc8b37180eb9d16e5516668efa9309f9e67b2 | 2506987b026bf30c7f9d53672755b0a22fce3379 | refs/heads/master | 2021-06-20T02:07:36.057694 | 2021-01-18T09:48:49 | 2021-01-18T09:48:49 | 174,128,053 | 11 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,033 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Video2_First_Design.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(400, 300)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 400, 21))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionNew = QtWidgets.QAction(MainWindow)
self.actionNew.setObjectName("actionNew")
self.menuFile.addAction(self.actionNew)
self.menubar.addAction(self.menuFile.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
MainWindow.setStatusTip(_translate("MainWindow", "This is the status bar"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.actionNew.setText(_translate("MainWindow", "New"))
self.actionNew.setStatusTip(_translate("MainWindow", "New File"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| [
"meghb@packtpub.com"
] | meghb@packtpub.com |
d8b2259a4784af00b7eb6df6d3392ff14912b084 | a34f36f2f08791d353b63e786fa99fe7e7c65d9f | /1271A.py | 257db5f672839f50487f90a6efe3b20aaeba7768 | [] | no_license | vijay9908/code_forces | 5f758c4417d448fb2637dd4b896dfc59409f8b97 | 7d58e52aabea612dfed52dd3534e38563bf78633 | refs/heads/master | 2021-06-25T11:54:55.179108 | 2020-11-19T15:24:08 | 2020-11-19T15:24:08 | 173,603,181 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | a = int(input())
b = int(input())
c = int(input())
d = int(input())
e = int(input())
f = int(input())
x=min(a,d)
awd=a-x
dwd=d-x
y=min(dwd,b,c)
z=min(d,b,c)
d2=d-z
g=min(d2,a)
first=x*e+y*f
second=z*f+g*e
print(max(first,second))
| [
"vijaytanmay055@gmail.com"
] | vijaytanmay055@gmail.com |
bff1c589f0daa9c4c1748c6ff163db8a770639fe | f7ec01cc0419fa38639a8f4514aeb288bf70e8d5 | /project/object_detection/yolo_v3/utils/IOU.py | 4b0028f66b364588e3126cbaebb70bbe9e3e6dab | [] | no_license | lovejing0306/TensorFlow | dd10e58734603cb0f22c4adf32d849a4cfb08dcd | d4d4aec4d086ab916ffb1db7f992edd1b1a31eb4 | refs/heads/master | 2021-06-18T23:48:43.995706 | 2019-05-08T06:19:48 | 2019-05-08T06:19:48 | 92,510,824 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,340 | py | import numpy as np
import tensorflow as tf
def calculate_min(point, data):
min_point = point - data
return min_point
def calculate_max(point, data):
max_point = point + data
return max_point
def IOU_calculator(x, y, width, height, l_x, l_y, l_width, l_height):
'''
Cculate IOU
:param x: net predicted x
:param y: net predicted y
:param width: net predicted width
:param height: net predicted height
:param l_x: label x
:param l_y: label y
:param l_width: label width
:param l_height: label height
:return: IOU
'''
x_max = calculate_max(x, width / 2)
y_max = calculate_max(y, height / 2)
x_min = calculate_min(x, width / 2)
y_min = calculate_min(y, height / 2)
l_x_max = calculate_max(l_x, width / 2)
l_y_max = calculate_max(l_y, height / 2)
l_x_min = calculate_min(l_x, width / 2)
l_y_min = calculate_min(l_y, height / 2)
'''--------Caculate Both Area's point--------'''
xend = tf.minimum(x_max, l_x_max)
xstart = tf.maximum(x_min, l_x_min)
yend = tf.minimum(y_max, l_y_max)
ystart = tf.maximum(y_min, l_y_min)
area_width = xend - xstart
area_height = yend - ystart
'''--------Caculate the IOU--------'''
area = area_width * area_height
all_area = tf.cond((width * height + l_width * l_height - area) <= 0, lambda: tf.cast(1e-8, tf.float32),
lambda: (width * height + l_width * l_height - area))
IOU = area / all_area
IOU = tf.cond(area_width < 0, lambda: tf.cast(1e-8, tf.float32), lambda: IOU)
IOU = tf.cond(area_height < 0, lambda: tf.cast(1e-8, tf.float32), lambda: IOU)
return IOU
'''--------Test the IOU function--------'''
if __name__ == '__main__':
IOU1 = IOU_calculator(tf.cast(1, tf.float32), tf.cast(1, tf.float32), tf.cast(2, tf.float32),
tf.cast(2, tf.float32),
tf.cast(2, tf.float32), tf.cast(2, tf.float32), tf.cast(2, tf.float32),
tf.cast(2, tf.float32))
IOU = IOU_calculator(tf.cast(0, tf.float32), tf.cast(0, tf.float32), tf.cast(0, tf.float32), tf.cast(0, tf.float32),
tf.cast(0, tf.float32), tf.cast(0, tf.float32), tf.cast(0, tf.float32), tf.cast(0, tf.float32))
sess = tf.Session()
print(sess.run(IOU))
| [
"lovejing0306@gmail.com"
] | lovejing0306@gmail.com |
8316dd070bfe50ffe819d3cb3362e71859ec6e89 | dd3bbd4e7aaee7a8a5f26b927ce28ac472c855a5 | /eggs/PIL-1.1.6-py2.7-linux-i686.egg/EGG-INFO/scripts/pilprint.py | 434bed3e44cd2b2c91d8c55fefa724a1bc9c6417 | [] | no_license | nacho22martin/tesis | ea0a822f8bdbdef6f13f41276ecd4d6e85427ca5 | e137eb6225cc5e724bee74a892567796166134ac | refs/heads/master | 2020-12-24T13:20:58.334839 | 2013-11-09T12:42:41 | 2013-11-09T12:42:41 | 14,261,570 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,370 | py | #!/home/ignacio/plone-virtualenv/bin/python
#
# The Python Imaging Library.
# $Id: pilprint.py,v 1.1.1.1 2007/09/26 00:00:36 chrism Exp $
#
# print image files to postscript printer
#
# History:
# 0.1 1996-04-20 fl Created
# 0.2 1996-10-04 fl Use draft mode when converting.
# 0.3 2003-05-06 fl Fixed a typo or two.
#
VERSION = "pilprint 0.3/2003-05-05"
import Image
import PSDraw
letter = ( 1.0*72, 1.0*72, 7.5*72, 10.0*72 )
def description(file, image):
import os
title = os.path.splitext(os.path.split(file)[1])[0]
format = " (%dx%d "
if image.format:
format = " (" + image.format + " %dx%d "
return title + format % image.size + image.mode + ")"
import getopt, os, sys
if len(sys.argv) == 1:
print "PIL Print 0.2a1/96-10-04 -- print image files"
print "Usage: pilprint files..."
print "Options:"
print " -c colour printer (default is monochrome)"
print " -p print via lpr (default is stdout)"
print " -P <printer> same as -p but use given printer"
sys.exit(1)
try:
opt, argv = getopt.getopt(sys.argv[1:], "cdpP:")
except getopt.error, v:
print v
sys.exit(1)
printer = None # print to stdout
monochrome = 1 # reduce file size for most common case
for o, a in opt:
if o == "-d":
# debug: show available drivers
Image.init()
print Image.ID
sys.exit(1)
elif o == "-c":
# colour printer
monochrome = 0
elif o == "-p":
# default printer channel
printer = "lpr"
elif o == "-P":
# printer channel
printer = "lpr -P%s" % a
for file in argv:
try:
im = Image.open(file)
title = description(file, im)
if monochrome and im.mode not in ["1", "L"]:
im.draft("L", im.size)
im = im.convert("L")
if printer:
fp = os.popen(printer, "w")
else:
fp = sys.stdout
ps = PSDraw.PSDraw(fp)
ps.begin_document()
ps.setfont("Helvetica-Narrow-Bold", 18)
ps.text((letter[0], letter[3]+24), title)
ps.setfont("Helvetica-Narrow-Bold", 8)
ps.text((letter[0], letter[1]-30), VERSION)
ps.image(letter, im)
ps.end_document()
except:
print "cannot print image",
print "(%s:%s)" % (sys.exc_type, sys.exc_value)
| [
"ignacio@plone.(none)"
] | ignacio@plone.(none) |
37395963f8379853974f5f8696e8b8931e11ce62 | 9edaf93c833ba90ae9a903aa3c44c407a7e55198 | /travelport/models/profile_create_tags_rsp_2.py | ad47f748c488af1111b4d2fa7afdfb17c4763642 | [] | no_license | tefra/xsdata-samples | c50aab4828b8c7c4448dbdab9c67d1ebc519e292 | ef027fe02e6a075d8ed676c86a80e9647d944571 | refs/heads/main | 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 | Python | UTF-8 | Python | false | false | 766 | py | from __future__ import annotations
from dataclasses import dataclass, field
from travelport.models.base_rsp_5 import BaseRsp5
from travelport.models.tag_2 import Tag2
__NAMESPACE__ = "http://www.travelport.com/schema/uprofile_v37_0"
@dataclass
class ProfileCreateTagsRsp2(BaseRsp5):
"""
Response with all the tags for the agency.
Parameters
----------
tag
A tag that belongs to the agency.
"""
class Meta:
name = "ProfileCreateTagsRsp"
namespace = "http://www.travelport.com/schema/uprofile_v37_0"
tag: list[Tag2] = field(
default_factory=list,
metadata={
"name": "Tag",
"type": "Element",
"min_occurs": 1,
"max_occurs": 15,
}
)
| [
"chris@komposta.net"
] | chris@komposta.net |
634891dd887f82412cd1b7ec26823a973afb2113 | d4a30b61cf9b4602fe09a056f80f210d7426830f | /clinvoc/loinc.py | aea903aa670a8dd14fd2cf7761d1e074243e1184 | [
"MIT"
] | permissive | dustinrbunch/clinvoc | c3bbe444ad1e39f63a1ed9c8268cd8ee64e02113 | 98b0be94f4c3c5a9ea58a343c5ce5e27b688d1a0 | refs/heads/master | 2021-08-22T18:24:12.819112 | 2017-11-30T23:09:24 | 2017-11-30T23:09:24 | 116,181,004 | 0 | 1 | null | 2018-01-03T20:54:04 | 2018-01-03T20:54:03 | null | UTF-8 | Python | false | false | 1,043 | py | import csv
from .base import RegexVocabulary, LexicographicPatternMatchVocabulary, LexicographicRangeFillVocabulary, \
LexicographicVocabulary, left_pad, ObservationVocabulary
import os
from .resources import resources
from six import next
import io
def _read_text_file(filename):
codes = []
with io.open(filename, mode='rt', encoding='utf-8') as infile:
reader = csv.reader(infile, delimiter=',', quoting=csv.QUOTE_ALL)
next(reader)
for line in reader:
codes.append(line[0])
return codes
_all_loinc_codes = _read_text_file(os.path.join(resources, 'LOINC_2.59_Text', 'loinc.csv'))
class LOINC(RegexVocabulary, LexicographicPatternMatchVocabulary, LexicographicRangeFillVocabulary, ObservationVocabulary):
vocab_name = 'LOINC'
def __init__(self):
RegexVocabulary.__init__(self, '[\d\*]{1,5}\-[\d\*]')
LexicographicVocabulary.__init__(self, map(self.standardize, _all_loinc_codes))
def _standardize(self, code):
return left_pad(code, 7)
| [
"jcrudy@gmail.com"
] | jcrudy@gmail.com |
f7b39286d2b091f1ad630633a4d2b7ec3098387d | 94e7c790d17ba08e8a2a74077dd8b75e7ac120b0 | /chapter02/Exercise26_02.py | 9d8e9d3a031246e8e7c67e7f58065308f71f28bc | [] | no_license | lutfar9427/Exercises_Solution_of_INTRODUCTION_TO_PROGRAMMING_USING_Python | 9632e515428685dcaa7d057cf52f0e191e9f7ae0 | d037475316e6c6b7c6a7a7023318ef4ab4ed3f8d | refs/heads/master | 2020-09-02T09:04:44.990668 | 2018-10-20T00:50:12 | 2018-10-20T00:50:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 918 | py | ''' **2.26 (Turtle: draw a circle) Write a program that prompts the user to enter the
center and radius of a circle, and then displays the circle and its area.
/**
* @author BASSAM FARAMAWI
* @email tiodaronzi3@yahoo.com
* @since 2018
*/
'''
import turtle # Import turtle module
import math # Import cmath module
# Prompt the user to enter center point
centerX, centerY = eval(input("Enter the circle center in X and Y: "))
# Prompt the user to enter the circle radius
radius = eval(input("Enter the circle radius: "))
turtle.showturtle() # Show the turtle graphics window
# Draw circle
turtle.penup()
turtle.goto(centerX, centerY - radius)
turtle.pendown()
turtle.circle(radius)
turtle.penup()
# Write area inside circle
turtle.goto(centerX, centerY)
turtle.pendown()
area = math.pi * radius ** 2
turtle.write(int(area * 100) / 100)
turtle.done() # Don't close the turtle graphics window
| [
"tiodaronzi3@yahoo.com"
] | tiodaronzi3@yahoo.com |
b78e01d3e839e6f6c9a74c11e5ff4bc3a6cd0f88 | 70e970ce9ec131449b0888388f65f0bb55f098cd | /SignalMC/python/pythia8/AMSB_gluinoToChargino_M-1600GeV_M-700GeV_CTau-10cm_TuneCP5_13TeV_pythia8_cff.py | 035618a10cec7e4d889596252199520fcdd5d624 | [] | no_license | OSU-CMS/DisappTrks | 53b790cc05cc8fe3a9f7fbd097284c5663e1421d | 1d1c076863a9f8dbd3f0c077d5821a8333fc5196 | refs/heads/master | 2023-09-03T15:10:16.269126 | 2023-05-25T18:37:40 | 2023-05-25T18:37:40 | 13,272,469 | 5 | 12 | null | 2023-09-13T12:15:49 | 2013-10-02T13:58:51 | Python | UTF-8 | Python | false | false | 8,097 | py | COM_ENERGY = 13000.
MGLU = 1600 # GeV
MCHI = 700 # GeV
CTAU = 100 # mm
CROSS_SECTION = 0.00887 # pb
SLHA_TABLE="""
# ISAJET SUSY parameters in SUSY Les Houches Accord 2 format
# Created by ISALHA 2.0 Last revision: C. Balazs 21 Apr 2009
Block SPINFO # Program information
1 ISASUGRA from ISAJET # Spectrum Calculator
2 7.80 29-OCT-2009 12:50:36 # Version number
Block MODSEL # Model selection
1 3 # Minimal anomaly mediated (AMSB) model
Block SMINPUTS # Standard Model inputs
1 1.27842453E+02 # alpha_em^(-1)
2 1.16570000E-05 # G_Fermi
3 1.17200002E-01 # alpha_s(M_Z)
4 9.11699982E+01 # m_{Z}(pole)
5 4.19999981E+00 # m_{b}(m_{b})
6 1.73070007E+02 # m_{top}(pole)
7 1.77699995E+00 # m_{tau}(pole)
Block MINPAR # SUSY breaking input parameters
1 1.50000000E+03 # m_0
2 2.46440000E+05 # m_{3/2}
3 5.00000000E+00 # tan(beta)
4 1.00000000E+00 # sign(mu)
Block EXTPAR # Non-universal SUSY breaking parameters
0 1.04228903E+16 # Input scale
Block MASS # Scalar and gaugino mass spectrum
# PDG code mass particle
24 8.04229965E+01 # W^+
25 1.16918777E+02 # h^0
35 4.13995459E+03 # H^0
36 4.11271240E+03 # A^0
37 4.12772119E+03 # H^+
1000001 4.68634814E+03 # dnl
1000002 4.68567432E+03 # upl
1000003 4.68634814E+03 # stl
1000004 4.68567480E+03 # chl
1000005 4.09400562E+03 # b1
1000006 3.40991528E+03 # t1
1000011 1.14678894E+03 # el-
1000012 1.12562231E+03 # nuel
1000013 1.14678894E+03 # mul-
1000014 1.12562231E+03 # numl
1000015 1.02227649E+03 # tau1
1000016 1.11225781E+03 # nutl
1000021 %.9g # glss
1000022 6.99874146E+02 # z1ss
1000023 2.26904956E+03 # z2ss
1000024 7.00047607E+02 # w1ss
1000025 -3.87153369E+03 # z3ss
1000035 3.87282349E+03 # z4ss
1000037 3.87772314E+03 # w2ss
2000001 4.76078076E+03 # dnr
2000002 4.71648975E+03 # upr
2000003 4.76078076E+03 # str
2000004 4.71649023E+03 # chr
2000005 4.72474414E+03 # b2
2000006 4.13260303E+03 # t2
2000011 1.02800623E+03 # er-
2000013 1.02800623E+03 # mur-
2000015 1.12574829E+03 # tau2
Block ALPHA # Effective Higgs mixing parameter
-1.97664991E-01 # alpha
Block STOPMIX # stop mixing matrix
1 1 8.36024433E-02 # O_{11}
1 2 -9.96499181E-01 # O_{12}
2 1 9.96499181E-01 # O_{21}
2 2 8.36024433E-02 # O_{22}
Block SBOTMIX # sbottom mixing matrix
1 1 9.99983907E-01 # O_{11}
1 2 5.66892792E-03 # O_{12}
2 1 -5.66892792E-03 # O_{21}
2 2 9.99983907E-01 # O_{22}
Block STAUMIX # stau mixing matrix
1 1 1.32659495E-01 # O_{11}
1 2 9.91161644E-01 # O_{12}
2 1 -9.91161644E-01 # O_{21}
2 2 1.32659495E-01 # O_{22}
Block NMIX # neutralino mixing matrix
1 1 -8.25339637E-04 #
1 2 9.99776781E-01 #
1 3 -2.02405099E-02 #
1 4 6.01018919E-03 #
2 1 9.99794424E-01 #
2 2 1.23403966E-03 #
2 3 1.68632567E-02 #
2 4 -1.11932158E-02 #
3 1 -4.01982665E-03 #
3 2 1.00584431E-02 #
3 3 7.06979156E-01 #
3 4 7.07151294E-01 #
4 1 1.98580157E-02 #
4 2 -1.85414888E-02 #
4 3 -7.06743419E-01 #
4 4 7.06947982E-01 #
Block UMIX # chargino U mixing matrix
1 1 -9.99564528E-01 # U_{11}
1 2 2.95085218E-02 # U_{12}
2 1 -2.95085218E-02 # U_{21}
2 2 -9.99564528E-01 # U_{22}
Block VMIX # chargino V mixing matrix
1 1 -9.99936998E-01 # V_{11}
1 2 1.12252701E-02 # V_{12}
2 1 -1.12252701E-02 # V_{21}
2 2 -9.99936998E-01 # V_{22}
Block GAUGE Q= 3.58269727E+03 #
1 3.57497722E-01 # g`
2 6.52475953E-01 # g_2
3 1.22070026E+00 # g_3
Block YU Q= 3.58269727E+03 #
3 3 8.38887691E-01 # y_t
Block YD Q= 3.58269727E+03 #
3 3 6.52210116E-02 # y_b
Block YE Q= 3.58269727E+03 #
3 3 5.15824445E-02 # y_tau
Block HMIX Q= 3.58269727E+03 # Higgs mixing parameters
1 3.87514209E+03 # mu(Q)
2 5.00000000E+00 # tan(beta)(M_GUT)
3 2.51709106E+02 # Higgs vev at Q
4 1.69144040E+07 # m_A^2(Q)
Block MSOFT Q= 3.58269727E+03 # DRbar SUSY breaking parameters
1 2.30335156E+03 # M_1(Q)
2 6.64254944E+02 # M_2(Q)
3 -4.50376855E+03 # M_3(Q)
31 1.12926123E+03 # MeL(Q)
32 1.12926123E+03 # MmuL(Q)
33 1.11625525E+03 # MtauL(Q)
34 1.03541077E+03 # MeR(Q)
35 1.03541077E+03 # MmuR(Q)
36 9.99967957E+02 # MtauR(Q)
41 4.45722266E+03 # MqL1(Q)
42 4.45722266E+03 # MqL2(Q)
43 3.91252832E+03 # MqL3(Q)
44 4.48730469E+03 # MuR(Q)
45 4.48730469E+03 # McR(Q)
46 3.28067163E+03 # MtR(Q)
47 4.53066406E+03 # MdR(Q)
48 4.53066406E+03 # MsR(Q)
49 4.55108252E+03 # MbR(Q)
Block AU Q= 3.58269727E+03 #
1 1 3.86256177E+03 # A_u
2 2 3.86256177E+03 # A_c
3 3 3.86256177E+03 # A_t
Block AD Q= 3.58269727E+03 #
1 1 9.22079785E+03 # A_d
2 2 9.22079785E+03 # A_s
3 3 9.22079785E+03 # A_b
Block AE Q= 3.58269727E+03 #
1 1 2.57661255E+03 # A_e
2 2 2.57661255E+03 # A_mu
3 3 2.57661255E+03 # A_tau
#
#
#
# =================
# |The decay table|
# =================
#
# PDG Width
DECAY 1000021 5.50675438E+00 # gluino decay
# BR NDA ID1 ID2 ID3
2.50000000E-01 3 1 -1 1000022
2.50000000E-01 3 2 -2 1000022
2.50000000E-01 3 1 -2 1000024
2.50000000E-01 3 -1 2 -1000024
#
# PDG Width
DECAY 1000024 %.9g # chargino decay
#
""" % (MGLU, (1.97326979e-13 / CTAU))
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.MCTunes2017.PythiaCP5Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
pythiaPylistVerbosity = cms.untracked.int32(0),
filterEfficiency = cms.untracked.double(-1),
pythiaHepMCVerbosity = cms.untracked.bool(False),
SLHATableForPythia8 = cms.string('%s' % SLHA_TABLE),
comEnergy = cms.double(COM_ENERGY),
crossSection = cms.untracked.double(CROSS_SECTION),
maxEventsToPrint = cms.untracked.int32(0),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CP5SettingsBlock,
processParameters = cms.vstring(
'SUSY:all = off',
'SUSY:gg2gluinogluino = on',
'SUSY:qqbar2gluinogluino = on',
'1000024:isResonance = false',
'1000024:oneChannel = 1 1.0 100 1000022 211',
'1000024:tau0 = %.1f' % CTAU,
'ParticleDecays:tau0Max = %.1f' % (CTAU * 10),
),
parameterSets = cms.vstring(
'pythia8CommonSettings',
'pythia8CP5Settings',
'processParameters')
),
# The following parameters are required by Exotica_HSCP_SIM_cfi:
slhaFile = cms.untracked.string(''), # value not used
processFile = cms.untracked.string('SimG4Core/CustomPhysics/data/RhadronProcessList.txt'),
useregge = cms.bool(False),
hscpFlavor = cms.untracked.string('stau'),
massPoint = cms.untracked.int32(MCHI), # value not used
particleFile = cms.untracked.string('Configuration/GenProduction/python/ThirteenTeV/DisappTrksAMSBCascade/test/geant4_AMSB_chargino_%sGeV_ctau%scm.slha' % (MCHI, CTAU/10))
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"ahart@cern.ch"
] | ahart@cern.ch |
60a7145e797fe9b9da04053539801eccab969c45 | 5623771414b26c021be54facaaaefbd9314b389d | /pynativesite/ex7-stringcounter.py | 975d996c8e378511a3c7587ff2534e80a53bbd48 | [] | no_license | saxenasamarth/BootCamp_PythonLearning | 36b705b83c7f0e297931bb8d75cb541088690248 | d5b8fe2d6fcfe54c5a7393f218414b1122f3e49e | refs/heads/master | 2023-04-17T15:29:05.402863 | 2019-08-29T08:46:34 | 2019-08-29T08:46:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | a="Emma is a good developer. Emma is also a writer"
l=a.split()
myDict ={}
for i in l:
if i not in myDict:
myDict[i]=1
else:
myDict[i]+=1
print(myDict)
| [
"saxenasamarth@gmail.com"
] | saxenasamarth@gmail.com |
d6e9267928a3c0f506fc52dcf65dd8766979f701 | 63e6dbbca3fd45438300080e6def65896fe0f7ea | /tests/test_runner.py | 4ef52015c20d486a8ac3abf7e373495717d62d34 | [
"MIT"
] | permissive | rubyvirus/ApiTestEngine | ab7084b26ec8d046cb592df87d8a74cfa3cbe830 | be73317f593ecc2d42425f8e51109d45d3752d46 | refs/heads/master | 2021-01-01T19:33:27.153275 | 2017-07-27T13:12:10 | 2017-07-27T13:12:10 | 98,614,939 | 2 | 0 | null | 2017-07-28T06:11:58 | 2017-07-28T06:11:58 | null | UTF-8 | Python | false | false | 4,756 | py | import os
import requests
from ate import runner, exception, utils
from tests.base import ApiServerUnittest
class TestRunner(ApiServerUnittest):
def setUp(self):
self.test_runner = runner.Runner()
self.reset_all()
self.testcase_file_path_list = [
os.path.join(
os.getcwd(), 'tests/data/demo_testset_hardcode.yml'),
os.path.join(
os.getcwd(), 'tests/data/demo_testset_hardcode.json')
]
def reset_all(self):
url = "%s/api/reset-all" % self.host
headers = self.get_authenticated_headers()
return self.api_client.get(url, headers=headers)
def test_run_single_testcase(self):
for testcase_file_path in self.testcase_file_path_list:
testcases = utils.load_testcases(testcase_file_path)
testcase = testcases[0]["test"]
success, _ = self.test_runner.run_test(testcase)
self.assertTrue(success)
testcase = testcases[1]["test"]
success, _ = self.test_runner.run_test(testcase)
self.assertTrue(success)
testcase = testcases[2]["test"]
success, _ = self.test_runner.run_test(testcase)
self.assertTrue(success)
def test_run_single_testcase_fail(self):
testcase = {
"name": "get token",
"request": {
"url": "http://127.0.0.1:5000/api/get-token",
"method": "POST",
"headers": {
"content-type": "application/json",
"user_agent": "iOS/10.3",
"device_sn": "HZfFBh6tU59EdXJ",
"os_platform": "ios",
"app_version": "2.8.6"
},
"json": {
"sign": "f1219719911caae89ccc301679857ebfda115ca2"
}
},
"extract_binds": [
{"token": "content.token"}
],
"validators": [
{"check": "status_code", "comparator": "eq", "expected": 205},
{"check": "content.token", "comparator": "len_eq", "expected": 19}
]
}
success, diff_content_list = self.test_runner.run_test(testcase)
self.assertFalse(success)
self.assertEqual(
diff_content_list[0],
{"check": "status_code", "comparator": "eq", "expected": 205, 'value': 200}
)
def test_run_testset_hardcode(self):
for testcase_file_path in self.testcase_file_path_list:
testsets = utils.load_testcases_by_path(testcase_file_path)
results = self.test_runner.run_testset(testsets[0])
self.assertEqual(len(results), 3)
self.assertEqual(results, [(True, [])] * 3)
def test_run_testsets_hardcode(self):
for testcase_file_path in self.testcase_file_path_list:
testsets = utils.load_testcases_by_path(testcase_file_path)
results = self.test_runner.run_testsets(testsets)
self.assertEqual(len(results), 1)
self.assertEqual(results, [[(True, [])] * 3])
def test_run_testset_template_variables(self):
testcase_file_path = os.path.join(
os.getcwd(), 'tests/data/demo_testset_variables.yml')
testsets = utils.load_testcases_by_path(testcase_file_path)
results = self.test_runner.run_testset(testsets[0])
self.assertEqual(len(results), 3)
self.assertEqual(results, [(True, [])] * 3)
def test_run_testset_template_import_functions(self):
testcase_file_path = os.path.join(
os.getcwd(), 'tests/data/demo_testset_template_import_functions.yml')
testsets = utils.load_testcases_by_path(testcase_file_path)
results = self.test_runner.run_testset(testsets[0])
self.assertEqual(len(results), 3)
self.assertEqual(results, [(True, [])] * 3)
def test_run_testsets_template_import_functions(self):
testcase_file_path = os.path.join(
os.getcwd(), 'tests/data/demo_testset_template_import_functions.yml')
testsets = utils.load_testcases_by_path(testcase_file_path)
results = self.test_runner.run_testsets(testsets)
self.assertEqual(len(results), 1)
self.assertEqual(results, [[(True, [])] * 3])
def test_run_testsets_template_lambda_functions(self):
testcase_file_path = os.path.join(
os.getcwd(), 'tests/data/demo_testset_template_lambda_functions.yml')
testsets = utils.load_testcases_by_path(testcase_file_path)
results = self.test_runner.run_testsets(testsets)
self.assertEqual(len(results), 1)
self.assertEqual(results, [[(True, [])] * 3])
| [
"mail@debugtalk.com"
] | mail@debugtalk.com |
680a6c84c39e1de7ff9a01a0299a6d53e240bf45 | 2c74bb301f1ed83b79254944183ac5a18a639fdf | /tests/components/octoprint/test_button.py | 644c1e39437a6d69a666b58933b51f6fe644fd34 | [
"Apache-2.0"
] | permissive | Adminiuga/home-assistant | 5bec93007ddac1a268cc359bf7e48530c5f73b38 | dcf68d768e4f628d038f1fdd6e40bad713fbc222 | refs/heads/dev | 2023-02-22T22:03:31.013931 | 2022-11-09T00:27:20 | 2022-11-09T00:27:20 | 123,929,062 | 5 | 4 | Apache-2.0 | 2023-02-22T06:14:31 | 2018-03-05T14:11:09 | Python | UTF-8 | Python | false | false | 6,445 | py | """Test the OctoPrint buttons."""
from unittest.mock import patch
from pyoctoprintapi import OctoprintPrinterInfo
import pytest
from homeassistant.components.button import DOMAIN as BUTTON_DOMAIN, SERVICE_PRESS
from homeassistant.components.octoprint import OctoprintDataUpdateCoordinator
from homeassistant.components.octoprint.button import InvalidPrinterState
from homeassistant.components.octoprint.const import DOMAIN
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.core import HomeAssistant
from . import init_integration
async def test_pause_job(hass: HomeAssistant):
"""Test the pause job button."""
await init_integration(hass, BUTTON_DOMAIN)
corrdinator: OctoprintDataUpdateCoordinator = hass.data[DOMAIN]["uuid"][
"coordinator"
]
# Test pausing the printer when it is printing
with patch("pyoctoprintapi.OctoprintClient.pause_job") as pause_command:
corrdinator.data["printer"] = OctoprintPrinterInfo(
{"state": {"flags": {"printing": True}}, "temperature": []}
)
await hass.services.async_call(
BUTTON_DOMAIN,
SERVICE_PRESS,
{
ATTR_ENTITY_ID: "button.octoprint_pause_job",
},
blocking=True,
)
assert len(pause_command.mock_calls) == 1
# Test pausing the printer when it is paused
with patch("pyoctoprintapi.OctoprintClient.pause_job") as pause_command:
corrdinator.data["printer"] = OctoprintPrinterInfo(
{"state": {"flags": {"printing": False, "paused": True}}, "temperature": []}
)
await hass.services.async_call(
BUTTON_DOMAIN,
SERVICE_PRESS,
{
ATTR_ENTITY_ID: "button.octoprint_pause_job",
},
blocking=True,
)
assert len(pause_command.mock_calls) == 0
# Test pausing the printer when it is stopped
with patch(
"pyoctoprintapi.OctoprintClient.pause_job"
) as pause_command, pytest.raises(InvalidPrinterState):
corrdinator.data["printer"] = OctoprintPrinterInfo(
{
"state": {"flags": {"printing": False, "paused": False}},
"temperature": [],
}
)
await hass.services.async_call(
BUTTON_DOMAIN,
SERVICE_PRESS,
{
ATTR_ENTITY_ID: "button.octoprint_pause_job",
},
blocking=True,
)
async def test_resume_job(hass: HomeAssistant):
"""Test the resume job button."""
await init_integration(hass, BUTTON_DOMAIN)
corrdinator: OctoprintDataUpdateCoordinator = hass.data[DOMAIN]["uuid"][
"coordinator"
]
# Test resuming the printer when it is paused
with patch("pyoctoprintapi.OctoprintClient.resume_job") as resume_command:
corrdinator.data["printer"] = OctoprintPrinterInfo(
{"state": {"flags": {"printing": False, "paused": True}}, "temperature": []}
)
await hass.services.async_call(
BUTTON_DOMAIN,
SERVICE_PRESS,
{
ATTR_ENTITY_ID: "button.octoprint_resume_job",
},
blocking=True,
)
assert len(resume_command.mock_calls) == 1
# Test resuming the printer when it is printing
with patch("pyoctoprintapi.OctoprintClient.resume_job") as resume_command:
corrdinator.data["printer"] = OctoprintPrinterInfo(
{"state": {"flags": {"printing": True, "paused": False}}, "temperature": []}
)
await hass.services.async_call(
BUTTON_DOMAIN,
SERVICE_PRESS,
{
ATTR_ENTITY_ID: "button.octoprint_resume_job",
},
blocking=True,
)
assert len(resume_command.mock_calls) == 0
# Test resuming the printer when it is stopped
with patch(
"pyoctoprintapi.OctoprintClient.resume_job"
) as resume_command, pytest.raises(InvalidPrinterState):
corrdinator.data["printer"] = OctoprintPrinterInfo(
{
"state": {"flags": {"printing": False, "paused": False}},
"temperature": [],
}
)
await hass.services.async_call(
BUTTON_DOMAIN,
SERVICE_PRESS,
{
ATTR_ENTITY_ID: "button.octoprint_resume_job",
},
blocking=True,
)
async def test_stop_job(hass: HomeAssistant):
"""Test the stop job button."""
await init_integration(hass, BUTTON_DOMAIN)
corrdinator: OctoprintDataUpdateCoordinator = hass.data[DOMAIN]["uuid"][
"coordinator"
]
# Test stopping the printer when it is paused
with patch("pyoctoprintapi.OctoprintClient.cancel_job") as stop_command:
corrdinator.data["printer"] = OctoprintPrinterInfo(
{"state": {"flags": {"printing": False, "paused": True}}, "temperature": []}
)
await hass.services.async_call(
BUTTON_DOMAIN,
SERVICE_PRESS,
{
ATTR_ENTITY_ID: "button.octoprint_stop_job",
},
blocking=True,
)
assert len(stop_command.mock_calls) == 1
# Test stopping the printer when it is printing
with patch("pyoctoprintapi.OctoprintClient.cancel_job") as stop_command:
corrdinator.data["printer"] = OctoprintPrinterInfo(
{"state": {"flags": {"printing": True, "paused": False}}, "temperature": []}
)
await hass.services.async_call(
BUTTON_DOMAIN,
SERVICE_PRESS,
{
ATTR_ENTITY_ID: "button.octoprint_stop_job",
},
blocking=True,
)
assert len(stop_command.mock_calls) == 1
# Test stopping the printer when it is stopped
with patch("pyoctoprintapi.OctoprintClient.cancel_job") as stop_command:
corrdinator.data["printer"] = OctoprintPrinterInfo(
{
"state": {"flags": {"printing": False, "paused": False}},
"temperature": [],
}
)
await hass.services.async_call(
BUTTON_DOMAIN,
SERVICE_PRESS,
{
ATTR_ENTITY_ID: "button.octoprint_stop_job",
},
blocking=True,
)
assert len(stop_command.mock_calls) == 0
| [
"noreply@github.com"
] | Adminiuga.noreply@github.com |
5249e7fc960985d57a355934617c01ef033bc8b0 | bc5e3ec2665f795b84671317ce736719ab79dc0f | /unit_tests/source_information/test_delete_source_information.py | e3fc02bbbeac4e5baefc5f7d83a8dc72323e5342 | [
"MIT"
] | permissive | uk-gov-mirror/LandRegistry.maintain-frontend | 9de44a9f42c4c29682276420dcf297d0afb48e5f | d92446a9972ebbcd9a43a7a7444a528aa2f30bf7 | refs/heads/master | 2021-09-26T16:14:55.686790 | 2018-10-29T15:37:03 | 2018-10-31T14:42:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,681 | py | from flask_testing import TestCase
from flask import url_for
from maintain_frontend import main
from unit_tests.utilities import Utilities
from unittest.mock import patch
from maintain_frontend.dependencies.session_api.session import Session
from maintain_frontend.constants.permissions import Permissions
class TestAddSourceInformation(TestCase):
def create_app(self):
main.app.testing = True
Utilities.mock_session_cookie_flask_test(self)
return main.app
def setUp(self):
main.app.config['Testing'] = True
main.app.config['WTF_CSRF_ENABLED'] = False
self.client.set_cookie('localhost', Session.session_cookie_name,
'cookie_value')
def test_get_delete_source_information(self):
self.mock_session.return_value.user.permissions = [Permissions.manage_source_information]
response = self.client.get(url_for('source_info.get_delete_source_information'))
self.assert200(response)
def test_get_delete_source_information_no_permissions(self):
self.mock_session.return_value.user.permissions = []
response = self.client.get(url_for('source_info.get_delete_source_information'))
self.assert_status(response, 302)
self.assertRedirects(response, '/not-authorised')
@patch('maintain_frontend.source_information.delete_source_information.LocalAuthorityService')
@patch('maintain_frontend.source_information.delete_source_information.request')
def test_post_delete_source_information(self, mock_request, mock_local_authority_service):
self.mock_session.return_value.user.permissions = [Permissions.manage_source_information]
self.mock_session.return_value.user.organisation = "Test Organisation"
self.mock_session.return_value.source_information_id = 1
self.mock_session.return_value.submit_token = "previous-token"
mock_request.form = {"csrf_token": "new-token"}
response = self.client.post(url_for('source_info.post_delete_source_information'))
self.assert_status(response, 302)
self.assertRedirects(response, url_for('source_info.get_delete_source_information_success'))
self.assertTrue(self.mock_session.return_value.commit.called)
self.assertEqual(self.mock_session.return_value.submit_token, "new-token")
mock_local_authority_service.return_value.delete_source_information_for_organisation\
.assert_called_with("Test Organisation", 1)
def test_post_delete_source_information_no_permissions(self):
self.mock_session.return_value.user.permissions = []
response = self.client.post(url_for('source_info.post_delete_source_information'))
self.assert_status(response, 302)
self.assertRedirects(response, '/not-authorised')
def test_get_delete_source_information_success(self):
self.mock_session.return_value.user.permissions = [Permissions.manage_source_information]
self.mock_session.return_value.source_information = "Source information"
response = self.client.get(url_for('source_info.get_delete_source_information_success'))
self.assert_status(response, 200)
self.assertTrue(self.mock_session.return_value.commit.called)
self.assertIsNone(self.mock_session.return_value.source_information)
def test_get_delete_source_information_success_no_permission(self):
self.mock_session.return_value.user.permissions = []
response = self.client.get(url_for('source_info.get_delete_source_information_success'))
self.assert_status(response, 302)
self.assertRedirects(response, '/not-authorised')
| [
"james.lademann@landregistry.gov.uk"
] | james.lademann@landregistry.gov.uk |
392aa046fe0104e22e235838f4d8355d8a3939fa | 7f44a279773732b183963349d146a8dd9a195b88 | /home/migrations/0029_room_exhibition_page.py | a976be38f8ae51532c2347c6b5fa4bca07ade57c | [] | no_license | pseudobabble/cms-boilerplate | f138060e2f25721191289eb261185136ae9cf6bd | 3923a8ebe1541118c5551b0996557f241943831f | refs/heads/master | 2022-12-28T01:30:49.554898 | 2020-10-15T15:23:10 | 2020-10-15T15:23:10 | 283,308,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | # Generated by Django 3.1.1 on 2020-09-30 21:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0052_pagelogentry'),
('home', '0028_auto_20200930_2134'),
]
operations = [
migrations.AddField(
model_name='room',
name='exhibition_page',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailcore.page'),
),
]
| [
"harryjohnson500@gmail.com"
] | harryjohnson500@gmail.com |
d26eeb5552b0369f084f7a5128394a09e9dc4b0c | e0980f704a573894350e285f66f4cf390837238e | /.history/menus/models_20201030113954.py | 8695d3c226c94c245723c3f81cade7c00b47a490 | [] | no_license | rucpata/WagtailWebsite | 28008474ec779d12ef43bceb61827168274a8b61 | 5aa44f51592f49c9a708fc5515ad877c6a29dfd9 | refs/heads/main | 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | from django.db import models
from django_extensions.db.fields
from modelcluster.models import ClusterableModel
# Create your models here.
class Menu(ClusterableModel):
title = models.CharField(max_length=100)
slug = AutoSlugField(
populate_from='title',
editable=True,
)
| [
"rucinska.patrycja@gmail.com"
] | rucinska.patrycja@gmail.com |
5c874cba631eba15e0314dd533f3b1f495c59b31 | ec1059f4ccea10deb2cb8fd7f9458700a5e6ca4c | /venv/Lib/site-packages/qiskit/circuit/library/standard_gates/iswap.py | b20ce83c988b144870152947a13771d29c91abc7 | [
"Apache-2.0",
"MIT"
] | permissive | shivam675/Quantum-CERN | b60c697a3a7ad836b3653ee9ce3875a6eafae3ba | ce02d9198d9f5a1aa828482fea9b213a725b56bb | refs/heads/main | 2023-01-06T20:07:15.994294 | 2020-11-13T10:01:38 | 2020-11-13T10:01:38 | 330,435,191 | 1 | 0 | MIT | 2021-01-17T16:29:26 | 2021-01-17T16:29:25 | null | UTF-8 | Python | false | false | 3,414 | py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""iSWAP gate."""
import numpy as np
from qiskit.circuit.gate import Gate
from qiskit.circuit.quantumregister import QuantumRegister
class iSwapGate(Gate):
r"""iSWAP gate.
A 2-qubit XX+YY interaction.
This is a Clifford and symmetric gate. Its action is to swap two qubit
states and phase the :math:`|01\rangle` and :math:`|10\rangle`
amplitudes by i.
**Circuit Symbol:**
.. parsed-literal::
q_0: ─⨂─
│
q_1: ─⨂─
**Reference Implementation:**
.. parsed-literal::
┌───┐┌───┐ ┌───┐
q_0: ┤ S ├┤ H ├──■──┤ X ├─────
├───┤└───┘┌─┴─┐└─┬─┘┌───┐
q_1: ┤ S ├─────┤ X ├──■──┤ H ├
└───┘ └───┘ └───┘
**Matrix Representation:**
.. math::
iSWAP = R_{XX+YY}(-\frac{\pi}{2})
= exp(i \frac{\pi}{4} (X{\otimes}X+Y{\otimes}Y)) =
\begin{pmatrix}
1 & 0 & 0 & 0 \\
0 & 0 & i & 0 \\
0 & i & 0 & 0 \\
0 & 0 & 0 & 1
\end{pmatrix}
This gate is equivalent to a SWAP up to a diagonal.
.. math::
iSWAP =
\begin{pmatrix}
1 & 0 & 0 & 0 \\
0 & 0 & 1 & 0 \\
0 & 1 & 0 & 0 \\
0 & 0 & 0 & 1
\end{pmatrix}
. \begin{pmatrix}
1 & 0 & 0 & 0 \\
0 & i & 0 & 0 \\
0 & 0 & i & 0 \\
0 & 0 & 0 & 1
\end{pmatrix}
"""
def __init__(self):
"""Create new iSwap gate."""
super().__init__('iswap', 2, [])
def _define(self):
"""
gate iswap a,b {
s q[0];
s q[1];
h q[0];
cx q[0],q[1];
cx q[1],q[0];
h q[1];
}
"""
# pylint: disable=cyclic-import
from qiskit.circuit.quantumcircuit import QuantumCircuit
from .h import HGate
from .s import SGate
from .x import CXGate
q = QuantumRegister(2, 'q')
qc = QuantumCircuit(q, name=self.name)
rules = [
(SGate(), [q[0]], []),
(SGate(), [q[1]], []),
(HGate(), [q[0]], []),
(CXGate(), [q[0], q[1]], []),
(CXGate(), [q[1], q[0]], []),
(HGate(), [q[1]], [])
]
for instr, qargs, cargs in rules:
qc._append(instr, qargs, cargs)
self.definition = qc
def to_matrix(self):
"""Return a numpy.array for the iSWAP gate."""
return np.array([[1, 0, 0, 0],
[0, 0, 1j, 0],
[0, 1j, 0, 0],
[0, 0, 0, 1]], dtype=complex)
| [
"vinfinitysailor@gmail.com"
] | vinfinitysailor@gmail.com |
11fe2fa01c814fae5d0430170f17bba2e1579500 | 9d07335de5a17453bf8ae290d70993d7b20dddcd | /.history/rw_visual_20210223182409.py | 873abd9382e1b2417301033de09af55b5ee79f83 | [] | no_license | wh-debug/Matplotlib | 8d12291cd4135b3b42c185e6700f22c627ddb046 | b4f5bf63d977620f799d953c67b262c75344a1cb | refs/heads/master | 2023-03-14T10:09:33.602492 | 2021-02-23T13:51:21 | 2021-02-23T13:51:21 | 340,374,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | '''
Author: your name
Date: 2021-02-23 18:21:32
LastEditTime: 2021-02-23 18:24:09
LastEditors: Please set LastEditors
Description: In User Settings Edit
FilePath: \Matplotlib\rw_visual.py
'''
import matplotlib.pyplot as plt
from randowwalk import Randomwalk
rw = Randomwalk()
re.fill_walk()
plt.style.use('classic')
fig, ax = plt.subplots()
ax.scatter(rw.x_values, rw.y_values, s=15)
plt.show() | [
"1813763848@qq.com"
] | 1813763848@qq.com |
7e50e95d03e70a3bfb183699c250db14e12f123e | e9539de5b8832e2a09365917fe201a945bf5d99b | /leetcode415.py | e00d00059a0b342b775f908c101966fc440b5988 | [] | no_license | JoshuaW1990/leetcode-session1 | 56d57df30b21ccade3fe54e3fd56a2b3383bd793 | 6fc170c04fadec6966fb7938a07474d4ee107b61 | refs/heads/master | 2021-09-20T16:18:15.640839 | 2018-08-12T09:40:51 | 2018-08-12T09:40:51 | 76,912,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 847 | py | class Solution(object):
def addStrings(self, num1, num2):
"""
:type num1: str
:type num2: str
:rtype: str
"""
reversed_num1 = list(reversed(num1))
reversed_num2 = list(reversed(num2))
length = max(len(num1), len(num2))
res = [0 for _ in xrange(length + 1)]
for i in xrange(length):
if i < len(reversed_num1):
digit1 = reversed_num1[i]
else:
digit1 = '0'
if i < len(reversed_num2):
digit2 = reversed_num2[i]
else:
digit2 = '0'
res[i] += int(digit1) + int(digit2)
res[i + 1] += res[i] / 10
res[i] %= 10
while len(res) > 1 and res[-1] == 0:
res.pop()
return ''.join(map(str, reversed(res))) | [
"Jun.Wang@tufts.edu"
] | Jun.Wang@tufts.edu |
dc0f6e6e1f31a83777b94b3ca2f56521a5f9a717 | 3ccd609f68016aad24829b8dd3cdbb535fb0ff6d | /python/bpy/types/ArmatureGpencilModifier.py | 2455daa657818cda30d0712d8e5bd83aeba06697 | [] | no_license | katharostech/blender_externs | 79b2eed064fd927e3555aced3e2eb8a45840508e | fdf7f019a460de0fe7e62375c1c94f7ab0e9f68d | refs/heads/master | 2020-04-11T14:00:29.393478 | 2018-10-01T00:40:51 | 2018-10-01T00:40:51 | 161,838,212 | 1 | 1 | null | 2018-12-14T20:41:32 | 2018-12-14T20:41:32 | null | UTF-8 | Python | false | false | 204 | py | class ArmatureGpencilModifier:
invert_vertex_group = None
object = None
use_bone_envelopes = None
use_deform_preserve_volume = None
use_vertex_groups = None
vertex_group = None
| [
"troyedwardsjr@gmail.com"
] | troyedwardsjr@gmail.com |
d46e09613a217555e6e5ed34418ab5ecf17dcf85 | fbe77e9e2a53a4600a1d9b00b5f2c29ee3e8c59a | /externals/binaryen/test/waterfall/src/proc.py | 84bafe0942961b131f95feb6ae137242f3dae47a | [
"Apache-2.0",
"MIT",
"BSD-3-Clause"
] | permissive | AcuteAngleCloud/Acute-Angle-Chain | 8d4a1ad714f6de1493954326e109b6af112561b9 | 5ea50bee042212ccff797ece5018c64f3f50ceff | refs/heads/master | 2021-04-26T21:52:25.560457 | 2020-03-21T07:29:06 | 2020-03-21T07:29:06 | 124,164,376 | 10 | 5 | MIT | 2020-07-16T07:14:45 | 2018-03-07T02:03:53 | C++ | UTF-8 | Python | false | false | 1,838 | py | #! /usr/bin/env python
# Copyright 2016 WebAssembly Community Group participants
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is intended to be a drop-in replacement for the standard
# subprocess module, with the difference that it logs commands before it runs
# them. Everything not overriden should pass through to the subprocess module
# via the import trick below.
# Imports subprocess in its own namespace so we can always refer directly to
# its attributes.
import subprocess
import os
import sys
# Imports all of subprocess into the current namespace, effectively
# re-exporting everything.
from subprocess import * # flake8: noqa
# Now we can override any parts of subprocess we want, while leaving the rest.
def check_call(cmd, **kwargs):
cwd = kwargs.get('cwd', os.getcwd())
c = ' '.join('"' + c + '"' if ' ' in c else c for c in cmd)
print 'subprocess.check_call(`%s`, cwd=`%s`)' % (c, cwd)
sys.stdout.flush()
subprocess.check_call(cmd, **kwargs)
sys.stdout.flush()
def check_output(cmd, **kwargs):
cwd = kwargs.get('cwd', os.getcwd())
c = ' '.join('"' + c + '"' if ' ' in c else c for c in cmd)
print 'subprocess.check_output(`%s`, cwd=`%s`)' % (c, cwd)
sys.stdout.flush()
try:
return subprocess.check_output(cmd, **kwargs)
finally:
sys.stdout.flush()
| [
"caokun@acuteangle.cn"
] | caokun@acuteangle.cn |
064838b3c14fb7efa1e5da2a38c2ffa93073fad8 | 44846980df148e1a0621e8a359a7fd357482fd74 | /01-Defining_classes/06-Flower.py | d74019506df0fb22837b693e4de386c3b8f96df6 | [
"MIT"
] | permissive | Beshkov/Python_OOP | 2dbd3383126f226895b652c4feaf8d79d867d4f8 | 297edadb3e7801dfeee5752a20aae6aead8da610 | refs/heads/main | 2023-04-03T14:24:59.218856 | 2021-04-18T15:13:11 | 2021-04-18T15:13:11 | 341,330,079 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | class Flower:
def __init__(self, name, water_requirements):
self.name = name
self.water_requirements = water_requirements
self.is_happy = False
self.quantity = 0
def water(self, quantity):
self.quantity = quantity
if self.quantity >= self.water_requirements:
self.is_happy = True
def status(self):
if self.is_happy:
return f'{self.name} is happy'
return f'{self.name} is not happy'
flower = Flower("Lilly", 100)
flower.water(50)
print(flower.status())
flower.water(100)
print(flower.status())
| [
"alexander.beshkov@gmail.com"
] | alexander.beshkov@gmail.com |
3fe35b303c4495a32d225afeb818b22ab559ed00 | 709f9dd4c975718df248a22431b99321b8840988 | /solutions/data_structures/avg_rating_nested.py | cac491fdf3fed5d6eaa39e3f6985d7c65d1e3089 | [] | no_license | Lionking2018/intropython | f3dccea8fba4713ac91bafcc99d7a3969e0423de | 4382189d78bcb0f1403300d473244712eb49858e | refs/heads/master | 2020-05-04T01:10:33.074114 | 2019-04-01T03:30:59 | 2019-04-01T03:30:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 790 | py | netflix_favs = {
'Narcos': {
'premiere_year': 2015,
'total_seasons': 3,
'rating': 5,
'age_limit': '16+'
},
'The Good Place': {
'premiere_year': 2016,
'total_seasons': 3,
'rating': 4,
'age_limit': '13+'
},
'Sense8': {
'premiere_year': 2015,
'total_seasons': 2,
'rating': 3,
'age_limit': '16+'
},
'La niebla': {
'premiere_year': 2017,
'total_seasons': 1,
'rating': 5,
'age_limit': '16+'
},
}
premiere_years = []
for features in netflix_favs.values():
premiere_year = features['premiere_year']
premiere_years.append(premiere_year)
avg_premiere_years = sum(premiere_years) // len(premiere_years)
print(avg_premiere_years)
| [
"sdelquin@gmail.com"
] | sdelquin@gmail.com |
a6377abe573cb58c29683b449754f581e213f387 | 07d40ece1379dd95b6259b23f9358cafcd1daa36 | /business/my_page_business.py | 0eba7d4e43232d7ad421f9bb4555fbf044f451ba | [] | no_license | z1069867141/zzmx | b4c5881bae275b694a20649014439377cf916e46 | ab0266c935f6f21a158998dc84b5c02443f6d628 | refs/heads/master | 2022-08-24T06:49:24.834403 | 2020-05-24T17:06:40 | 2020-05-24T17:06:40 | 264,108,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,783 | py | import os
import sys
sys.path.append(os.getcwd())
from handle.my_page_handle import my_p
from selenium import webdriver
import time
import pymysql
from mysql.mysql_function import mysql_function
class my_page(object):
def __init__(self,driver):
self.mp_b = my_p(driver)
def click_set(self):
self.mp_b.click_set_button()
return self.check_shop_title()
def click_talk(self):
self.mp_b.click_talk_button()
return self.check_shop_title()
def click_login(self):
self.mp_b.click_login_button()
return self.check_shop_title()
def click_all_order(self):
self.mp_b.click_all_order_button()
return self.check_shop_title()
def click_wait_pay(self):
self.mp_b.click_wait_pay_button()
return self.check_shop_title()
def click_good_to_be_received(self):
self.mp_b.click_good_to_be_received_button()
return self.check_shop_title()
def click_to_be_delivered(self):
self.mp_b.click_to_be_delivered_button()
return self.check_shop_title()
def click_received(self):
self.mp_b.click_received_button()
return self.check_shop_title()
def click_my_wallet(self):
self.mp_b.click_my_wallet_button()
return self.check_shop_title()
def click_my_favourite(self):
self.mp_b.click_my_favourite_button()
return self.check_shop_title()
def click_my_customer_service(self):
self.mp_b.click_my_customer_service_button()
return self.check_shop_title()
def check_shop_title(self):
try:
if self.mp_b.get_login_button_text()=="登录":
return True
else:
return False
except:
return False | [
"919824370@qq.com"
] | 919824370@qq.com |
4ae106e866373b5dd410478098c0d0aed0281297 | a7f855efff14e0b15cffb3f035d8dc9f7f102afe | /mfb/extraMac/UTpackages/UTvolrend/UTVolumeLibrary.py | 4c446437e786ecadae139e63ffac5efdd7d37e4e | [] | no_license | BlenderCN-Org/FlipbookApp | 76fcd92644c4e18dd90885eeb49e5aecae28f6f0 | 0df2acebf76b40105812d2e3af8f0ef4784ab74c | refs/heads/master | 2020-05-27T14:33:25.330291 | 2014-07-10T17:47:29 | 2014-07-10T17:47:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,606 | py | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 1.3.31
#
# Don't modify this file, modify the SWIG interface instead.
# This file is compatible with both classic and new-style classes.
import _UTVolumeLibrary
import new
new_instancemethod = new.instancemethod
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'PySwigObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError,name
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
import types
try:
_object = types.ObjectType
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
del types
class VolumeRenderer(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, VolumeRenderer, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, VolumeRenderer, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _UTVolumeLibrary.new_VolumeRenderer(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _UTVolumeLibrary.delete_VolumeRenderer
__del__ = lambda self : None;
def initRenderer(*args): return _UTVolumeLibrary.VolumeRenderer_initRenderer(*args)
def setAspectRatio(*args): return _UTVolumeLibrary.VolumeRenderer_setAspectRatio(*args)
def setTextureSubCube(*args): return _UTVolumeLibrary.VolumeRenderer_setTextureSubCube(*args)
def setQuality(*args): return _UTVolumeLibrary.VolumeRenderer_setQuality(*args)
def getQuality(*args): return _UTVolumeLibrary.VolumeRenderer_getQuality(*args)
def setNearPlane(*args): return _UTVolumeLibrary.VolumeRenderer_setNearPlane(*args)
def getNearPlane(*args): return _UTVolumeLibrary.VolumeRenderer_getNearPlane(*args)
def isShadedRenderingAvailable(*args): return _UTVolumeLibrary.VolumeRenderer_isShadedRenderingAvailable(*args)
def enableShadedRendering(*args): return _UTVolumeLibrary.VolumeRenderer_enableShadedRendering(*args)
def disableShadedRendering(*args): return _UTVolumeLibrary.VolumeRenderer_disableShadedRendering(*args)
def uploadColorMappedData(*args): return _UTVolumeLibrary.VolumeRenderer_uploadColorMappedData(*args)
def uploadColorMappedDataWithBorder(*args): return _UTVolumeLibrary.VolumeRenderer_uploadColorMappedDataWithBorder(*args)
def testColorMappedData(*args): return _UTVolumeLibrary.VolumeRenderer_testColorMappedData(*args)
def testColorMappedDataWithBorder(*args): return _UTVolumeLibrary.VolumeRenderer_testColorMappedDataWithBorder(*args)
def uploadRGBAData(*args): return _UTVolumeLibrary.VolumeRenderer_uploadRGBAData(*args)
def uploadGradients(*args): return _UTVolumeLibrary.VolumeRenderer_uploadGradients(*args)
def calculateGradientsFromDensities(*args): return _UTVolumeLibrary.VolumeRenderer_calculateGradientsFromDensities(*args)
def uploadColorMap(*args): return _UTVolumeLibrary.VolumeRenderer_uploadColorMap(*args)
def getNumberOfPlanesRendered(*args): return _UTVolumeLibrary.VolumeRenderer_getNumberOfPlanesRendered(*args)
def renderVolume(*args): return _UTVolumeLibrary.VolumeRenderer_renderVolume(*args)
def uploadZeroPaddedData(*args): return _UTVolumeLibrary.VolumeRenderer_uploadZeroPaddedData(*args)
VolumeRenderer_swigregister = _UTVolumeLibrary.VolumeRenderer_swigregister
VolumeRenderer_swigregister(VolumeRenderer)
InitTexParameteri = _UTVolumeLibrary.InitTexParameteri
QueryExtension = _UTVolumeLibrary.QueryExtension
createNumArr = _UTVolumeLibrary.createNumArr
| [
"mike.c.pan@gmail.com"
] | mike.c.pan@gmail.com |
a27dd330b64895b45778896a3420a5b3299c3d2a | 7c8bd2e26fdabf1555e0150272ecf035f6c21bbd | /ps프로젝트/Tr/이진 검색 트리.py | fa0067dd613bfa12638a810af539b50dd098ff45 | [] | no_license | hyeokjinson/algorithm | 44090c2895763a0c53d48ff4084a96bdfc77f953 | 46c04e0f583d4c6ec4f51a24f19a373b173b3d5c | refs/heads/master | 2021-07-21T10:18:43.918149 | 2021-03-27T12:27:56 | 2021-03-27T12:27:56 | 245,392,582 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | import sys
sys.setrecursionlimit(10**6)
def postorder(start,end):
if start>end:
return
div=end+1
for i in range(start+1,end+1):
if post[start]<post[i]:
div=i
break
postorder(start+1,div-1)
postorder(div,end)
print(post[start])
if __name__ == '__main__':
post=[]
count=0
while count<=10000:
try:
num=int(input())
except:
break
post.append(num)
count+=1
postorder(0,len(post)-1) | [
"hjson817@gmail.com"
] | hjson817@gmail.com |
4cd55693bc93d9e19bd3ab13a30a7a91bac1d33b | 161d7836e73fba496838c59ce7ee94bf685fb696 | /_unittests/ut_mokadi/test_speech_system.py | 30263604f1b1059df1b3c977cb98899e92b1e9f4 | [
"MIT"
] | permissive | sdpython/botadi | 80b985f21bdab5f917316348ed5f5cf9aa053c40 | 5e5464824a9c446ac567031245603205848558d3 | refs/heads/master | 2022-06-15T00:06:17.801753 | 2022-06-13T11:44:21 | 2022-06-13T11:44:21 | 163,576,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,323 | py | # -*- coding: utf-8 -*-
"""
@brief test log(time=10s)
"""
import os
import unittest
import warnings
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import is_travis_or_appveyor, add_missing_development_version, ExtTestCase
class TestSpeechSystem(ExtTestCase):
def setUp(self):
add_missing_development_version(["jyquickhelper", "pymmails"],
__file__, hide=True)
def test_speech_system(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
if is_travis_or_appveyor():
# no keys
return
# bugged
warnings.warn(
"vocal_recognition_system does not return for a wav file.")
return
wav = os.path.join(os.path.abspath(
os.path.dirname(__file__)), "data", "output.wav")
with open(wav, "rb") as f:
content = f.read()
self.assertNotEmpty(content)
# from ensae_teaching_cs.cspython import vocal_recognition_system
# fLOG("start recognition")
# res = vocal_recognition_system(content)
# fLOG("end recognition")
# fLOG(res)
# self.assertTrue(isinstance(res, tuple))
if __name__ == "__main__":
unittest.main()
| [
"xavier.dupre@gmail.com"
] | xavier.dupre@gmail.com |
0d02876a6c11f287264f8d73c8660e9984834f4b | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_notch.py | dd803a71f31f6a3836bc904e8c2875a9eca60c60 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 304 | py |
#calss header
class _NOTCH():
def __init__(self,):
self.name = "NOTCH"
self.definitions = [u'to cut a notch in something']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'verbs'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
447d838e87100cdb48f71811397a7365383257b2 | 5588ea35c23e9d2a1a6ff0d5400d83b5b6c9bead | /optbinning/binning/binning_information.py | 5fea471d46df13df8807fd723d7634f43cf383c8 | [
"Apache-2.0"
] | permissive | mnjenga2/optbinning | 378b97bc0c10a96399ce22d9f11bc21bf8f9533f | 25af0722e1bdf6ebc68cfc6f0ce0156ac9b2bcd8 | refs/heads/master | 2022-11-17T10:24:59.622236 | 2020-07-19T15:55:23 | 2020-07-19T15:55:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,685 | py | """
Optimal binning information.
"""
# Guillermo Navas-Palencia <g.navas.palencia@gmail.com>
# Copyright (C) 2019
import numpy as np
from sklearn.base import BaseEstimator
from .options import continuous_optimal_binning_default_options
from .options import multiclass_optimal_binning_default_options
from .options import optimal_binning_default_options
from .options import sboptimal_binning_default_options
try:
from localsolver import LSStatistics
LOCALSOLVER_AVAILABLE = True
except ImportError:
LOCALSOLVER_AVAILABLE = False
def print_header():
header = (
"optbinning (Version 0.7.0)\n"
"Copyright (c) 2019-2020 Guillermo Navas-Palencia, Apache License 2.0"
"\n")
print(header)
def print_optional_parameters(dict_default_options, dict_user_options):
option_format = " {:<24} {:>15} * {}\n"
str_options = " Begin options\n"
for key, value in dict_default_options.items():
user_value = dict_user_options[key]
user_flag = "d" if value == user_value else "U"
if user_value is None:
user_value = "no"
elif isinstance(user_value, (list, np.ndarray, dict)):
user_value = "yes"
elif isinstance(user_value, BaseEstimator):
user_value = "yes"
str_options += option_format.format(key, str(user_value), user_flag)
str_options += " End options\n"
print(str_options)
def print_prebinning_statistics(n_prebins, n_refinement):
prebinning_stats = (
" Pre-binning statistics\n"
" Number of pre-bins {:>10}\n"
" Number of refinements {:>10}\n"
).format(n_prebins, n_refinement)
print(prebinning_stats)
def print_solver_statistics(solver_type, solver):
if solver_type == "cp":
n_booleans = solver.NumBooleans()
n_branches = solver.NumBranches()
n_conflicts = solver.NumConflicts()
objective = int(solver.ObjectiveValue())
best_objective_bound = int(solver.BestObjectiveBound())
solver_stats = (
" Solver statistics\n"
" Type {:>10}\n"
" Number of booleans {:>10}\n"
" Number of branches {:>10}\n"
" Number of conflicts {:>10}\n"
" Objective value {:>10}\n"
" Best objective bound {:>10}\n"
).format(solver_type, n_booleans, n_branches, n_conflicts,
objective, best_objective_bound)
elif solver_type == "mip":
n_constraints = solver.NumConstraints()
n_variables = solver.NumVariables()
objective = solver.Objective().Value()
best_bound = solver.Objective().BestBound()
solver_stats = (
" Solver statistics\n"
" Type {:>10}\n"
" Number of variables {:>10}\n"
" Number of constraints {:>10}\n"
" Objective value {:>10.4f}\n"
" Best objective bound {:>10.4f}\n"
).format(solver_type, n_variables, n_constraints, objective,
best_bound)
elif solver_type == "ls":
if not LOCALSOLVER_AVAILABLE:
raise ImportError('Cannot import localsolver. Install LocalSolver '
'or choose another solver, options are "cp" and '
'"mip".')
n_iterations = LSStatistics.get_nb_iterations(solver.statistics)
solver_stats = (
" Solver statistics\n"
" Type {:>10}\n"
" Number of iterations {:>10}\n"
).format(solver_type, n_iterations)
print(solver_stats)
def print_timing(solver_type, solver, time_total, time_preprocessing,
time_prebinning, time_solver, time_postprocessing):
p_preprocessing = time_preprocessing / time_total
p_prebinning = time_prebinning / time_total
p_solver = time_solver / time_total
p_postprocessing = time_postprocessing / time_total
if solver_type == "cp" and solver is not None:
time_optimizer = solver.WallTime()
time_model_generation = time_solver - time_optimizer
p_model_generation = time_model_generation / time_solver
p_optimizer = time_optimizer / time_solver
time_stats = (
" Timing\n"
" Total time {:>18.2f} sec\n"
" Pre-processing {:>18.2f} sec ({:>7.2%})\n"
" Pre-binning {:>18.2f} sec ({:>7.2%})\n"
" Solver {:>18.2f} sec ({:>7.2%})\n"
" model generation {:>18.2f} sec ({:>7.2%})\n"
" optimizer {:>18.2f} sec ({:>7.2%})\n"
" Post-processing {:>18.2f} sec ({:>7.2%})\n"
).format(time_total, time_preprocessing, p_preprocessing,
time_prebinning, p_prebinning, time_solver, p_solver,
time_model_generation, p_model_generation, time_optimizer,
p_optimizer, time_postprocessing, p_postprocessing)
else:
time_stats = (
" Timing\n"
" Total time {:>18.2f} sec\n"
" Pre-processing {:>18.2f} sec ({:>7.2%})\n"
" Pre-binning {:>18.2f} sec ({:>7.2%})\n"
" Solver {:>18.2f} sec ({:>7.2%})\n"
" Post-processing {:>18.2f} sec ({:>7.2%})\n"
).format(time_total, time_preprocessing, p_preprocessing,
time_prebinning, p_prebinning, time_solver, p_solver,
time_postprocessing, p_postprocessing)
print(time_stats)
def print_name_status(name, status):
if not name:
name = "UNKNOWN"
print(" Name : {:<32}\n"
" Status : {:<32}\n".format(name, status))
def print_main_info(name, status, time_total):
print_name_status(name, status)
print(" Time : {:<7.4f} sec\n".format(round(time_total, 4)))
def print_binning_information(binning_type, print_level, name, status,
solver_type, solver, time_total,
time_preprocessing, time_prebinning, time_solver,
time_postprocessing, n_prebins, n_refinements,
dict_user_options):
print_header()
if print_level == 2:
if binning_type == "optimalbinning":
dict_default_options = optimal_binning_default_options
elif binning_type == "multiclassoptimalbinning":
dict_default_options = multiclass_optimal_binning_default_options
elif binning_type == "continuousoptimalbinning":
dict_default_options = continuous_optimal_binning_default_options
elif binning_type == "sboptimalbinning":
dict_default_options = sboptimal_binning_default_options
print_optional_parameters(dict_default_options, dict_user_options)
if print_level == 0:
print_main_info(name, status, time_total)
elif print_level >= 1:
print_name_status(name, status)
print_prebinning_statistics(n_prebins, n_refinements)
if status in ("OPTIMAL", "FEASIBLE"):
if solver is not None:
print_solver_statistics(solver_type, solver)
print_timing(solver_type, solver, time_total, time_preprocessing,
time_prebinning, time_solver, time_postprocessing)
| [
"g.navas.palencia@gmail.com"
] | g.navas.palencia@gmail.com |
331e12f77b1298ca687aa8abf1b06e8b53670ca8 | 40be08bbfed4bd6a951c18cc4bc0bf1f00e7e8a6 | /lib/systems/d-tyrosine.py | b8c0453bbd0d2565282bb8aa5af87923778961db | [
"BSD-3-Clause"
] | permissive | pulsar-chem/Pulsar-Core | 5bf4239c0a0de74d3f12a1c8b9bea2867fd8960c | f8e64e04fdb01947708f098e833600c459c2ff0e | refs/heads/master | 2021-01-18T06:51:05.905464 | 2017-06-04T02:31:44 | 2017-06-04T02:31:44 | 46,251,809 | 0 | 2 | null | 2017-05-25T14:59:51 | 2015-11-16T04:21:59 | C++ | UTF-8 | Python | false | false | 1,324 | py | import pulsar as psr
def load_ref_system():
""" Returns d-tyrosine as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
C 2.8922 -0.8221 -1.1174
C 1.4285 -0.8328 -0.6961
C 1.0097 0.6023 -0.3298
H 1.7288 1.0195 0.4035
H 1.0702 1.2635 -1.2191
C -0.3639 0.6346 0.2566
C -1.4389 1.1258 -0.4880
H -1.2751 1.4887 -1.5098
C -2.7154 1.1673 0.0543
H -3.5569 1.5535 -0.5321
C -2.9150 0.7096 1.3642
C -1.8442 0.2179 2.1232
H -1.9966 -0.1379 3.1486
C -0.5751 0.1861 1.5616
H 0.2696 -0.1942 2.1482
O -4.1959 0.7766 1.8369
H -4.1980 0.4421 2.7252
N 0.6337 -1.4483 -1.7847
O 3.7365 -1.4352 -0.2570
O 3.4022 -0.3272 -2.1058
H 1.2939 -1.4919 0.2002
H 0.7306 -0.9184 -2.6259
H -0.3269 -1.4625 -1.5117
H 4.6271 -1.4007 -0.5923
""")
| [
"noreply@github.com"
] | pulsar-chem.noreply@github.com |
e44b5cc61b8f1316e7e39504e69b3d259b1fb826 | 61673ab9a42f7151de7337608c442fa6247f13bb | /turtle/hexagone/main.py | 79ce05588cb9650916614442edd18f018a6a02b6 | [
"MIT"
] | permissive | furas/python-examples | 22d101670ecd667a29376d7c7d7d86f8ec71f6cf | 95cb53b664f312e0830f010c0c96be94d4a4db90 | refs/heads/master | 2022-08-23T23:55:08.313936 | 2022-08-01T14:48:33 | 2022-08-01T14:48:33 | 45,575,296 | 176 | 91 | MIT | 2021-02-17T23:33:37 | 2015-11-04T23:54:32 | Python | UTF-8 | Python | false | false | 1,175 | py | import turtle
from math import pi, sin, cos
def hexagone(point, longueur,c):
l = longueur
x, y = point
turtle.up()
turtle.goto(point)
turtle.color(c[0]) #black
turtle.down()
turtle.begin_fill()
turtle.goto(l * cos(4 / 3 * pi )+x, l * sin(4 / 3 * pi)+y)
turtle.goto(l * cos(5 / 3 * pi)+x, l * sin(5 / 3 * pi)+y)
turtle.goto(l * cos(0)+x, l * sin(0)+y)
turtle.goto(point)
turtle.end_fill()
turtle.color(c[1]) #blue
turtle.begin_fill()
turtle.goto(l * cos(0)+x, l * sin(0)+y)
turtle.goto(l * cos(pi / 3)+x, l * sin(pi / 3)+y)
turtle.goto(l * cos(pi * 2 / 3)+x, l * sin(pi * 2 / 3)+y)
turtle.goto(point)
turtle.end_fill()
turtle.color(c[2]) #red
turtle.begin_fill()
turtle.goto(l * cos(pi * 2 / 3)+x, l * sin(pi * 2 / 3)+y)
turtle.goto(-l+x, 0+y)
turtle.goto(l * cos(4 / 3 * pi)+x, l * sin(4 / 3 * pi)+y)
turtle.goto(point)
turtle.end_fill()
turtle.up()
return True
hexagone((0,0), 50, ("black",("blue"),("red")))
hexagone((100,0), 50, ("black",("blue"),("red")))
hexagone((0,100), 50, ("black",("blue"),("red")))
hexagone((100,100), 50, ("black",("blue"),("red")))
turtle.done()
| [
"furas@tlen.pl"
] | furas@tlen.pl |
1a3e08e306facde599652aa55be243b8556bfc6d | 890d2361bcc185a65af1f1089fe594ce93a771c4 | /answers_100+_programs/tmp.py | bf7fb0dbe7b4d0949b23fbbadd4d9cf290527f9d | [] | no_license | panu2306/Python-programming-exercises | 307b255209233f95ac2b205cb063b56c303fe67d | a4df9c89d2cb07bbfb16d23be081efa55d738814 | refs/heads/master | 2023-01-22T18:12:56.047582 | 2020-12-05T11:21:21 | 2020-12-05T11:21:21 | 255,521,445 | 0 | 0 | null | 2020-04-14T05:48:30 | 2020-04-14T05:48:29 | null | UTF-8 | Python | false | false | 138 | py | import re
txt = "The rain in Spain"
x = re.search("^The.*Spain$", txt)
if x:
print("YES! We have a match!")
else:
print("No match")
| [
"pranavbhendawade@gmail.com"
] | pranavbhendawade@gmail.com |
d2e24dd5b76387fd4620ae86e797d5f2a4eeef1c | d5b48163d236ca770be8e687f92192e2971397e8 | /keysdict.py | 18963f29912462eeee5e2e06c2daef6dc14d3846 | [] | no_license | Kunal352000/python_program | 191f5d9c82980eb706e11457c2b5af54b0d2ae95 | 7a1c645f9eab87cc45a593955dcb61b35e2ce434 | refs/heads/main | 2023-07-12T19:06:19.121741 | 2021-08-21T11:58:41 | 2021-08-21T11:58:41 | 376,606,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | x={'a':39,'b':78,'c':65,'11':23,'12':45,13:40}
print(x)
print(x.keys())
print(type(x.keys()))
for i in x.keys():
print(i,end=" ")
print()
print(type(i))
| [
"noreply@github.com"
] | Kunal352000.noreply@github.com |
16b649016334f76c35494556505fbc781db4a3fb | d8008fdbfab54e36167747e8bb5ed639935a7d28 | /BigProj/Chatissimo/app.py | fe12cb4c6ed6e0b254791d9f56da90f9a6cc914f | [] | no_license | dancb10/ppscu.com | 90cce23496eaf97b0212988b23138d13046dab3b | cf1e28b41dcd6048cf2236f081891360f2741d03 | refs/heads/master | 2022-12-09T05:30:58.328023 | 2021-01-07T12:48:06 | 2021-01-07T12:48:06 | 63,581,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 862 | py | from flask import Flask, render_template
from flask_socketio import SocketIO
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app)
from flask_socketio import join_room, leave_room
from flask_socketio import send, emit
@socketio.on('my event')
def handle_my_custom_event(data):
emit('my response', data, broadcast=True)
@socketio.on('message')
def handle_message(message):
send(message, namespace='/chat')
@socketio.on('join')
def on_join(data):
username = data['username']
room = data['room']
join_room(room)
send(username + ' has entered the room.', room=room)
@socketio.on('leave')
def on_leave(data):
username = data['username']
room = data['room']
leave_room(room)
send(username + ' has left the room.', room=room)
if __name__ == '__main__':
socketio.run(app, host='0.0.0.0')
| [
"dapopesc@Dans-MacBook-Pro.local"
] | dapopesc@Dans-MacBook-Pro.local |
d22bb51e93002a52c6bd583ccc04d99e47130c60 | e3d09f5467a29e457048e8caccdce08b01387c8a | /tests/test_catalog.py | dd5e4a2f6cd9c8d3698b3ac541d4f557aa59087f | [
"MIT"
] | permissive | sajabdoli/jschon | 958181d24bbdc440725274067c6038f60ecaea1a | 63a602745c825abce3851207bd37372e0ce4452d | refs/heads/main | 2023-08-03T09:07:55.700537 | 2021-09-20T06:13:15 | 2021-09-20T06:13:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,332 | py | import json
import pathlib
import tempfile
import pytest
from jschon import Catalog, CatalogError, URI, JSONPointer, JSONSchema, JSON, create_catalog
from tests import example_schema, metaschema_uri_2020_12
json_example = {"foo": "bar"}
@pytest.fixture
def new_catalog():
return Catalog(default=False)
def test_new_catalog(new_catalog):
assert not new_catalog._directories
assert not new_catalog._vocabularies
assert not new_catalog._format_validators
assert not new_catalog._schema_cache
@pytest.fixture
def setup_tmpdir():
"""Create a temp dir hierarchy containing a JSON file.
Yield (tmpdir path, subdir name, file name) and clean up
afterwards.
"""
with tempfile.TemporaryDirectory() as tmpdir_path:
with tempfile.TemporaryDirectory(dir=tmpdir_path) as subdir_path:
with tempfile.NamedTemporaryFile(dir=subdir_path) as f:
f.write(json.dumps(json_example).encode())
f.flush()
yield tmpdir_path, pathlib.Path(subdir_path).name, pathlib.Path(f.name).name
@pytest.mark.parametrize('base_uri', [
'http://example.com/',
'http://example.com/foo/',
'http://example.com/foo/bar/',
])
def test_add_directory_and_load_json(base_uri, setup_tmpdir, new_catalog):
tmpdir_path, subdir_name, jsonfile_name = setup_tmpdir
new_catalog.add_directory(URI(base_uri), pathlib.Path(tmpdir_path))
json_doc = new_catalog.load_json(URI(f'{base_uri}{subdir_name}/{jsonfile_name}'))
assert json_doc == json_example
# incorrect base URI
with pytest.raises(CatalogError):
new_catalog.load_json(URI(f'http://example.net/{subdir_name}/{jsonfile_name}'))
# incorrect file name
with pytest.raises(CatalogError):
new_catalog.load_json(URI(f'{base_uri}{subdir_name}/baz'))
@pytest.mark.parametrize('base_uri', [
'//example.com/foo/bar/', # no scheme
'http://Example.com/foo/bar/', # not normalized
'http://example.com/foo/#', # contains empty fragment
'http://example.com/foo/#bar', # contains non-empty fragment
'http://example.com/foo/bar', # does not end with '/'
])
def test_add_directory_invalid_uri(base_uri, setup_tmpdir, new_catalog):
tmpdir_path, subdir_name, jsonfile_name = setup_tmpdir
with pytest.raises(CatalogError):
new_catalog.add_directory(URI(base_uri), pathlib.Path(tmpdir_path))
def test_add_directory_invalid_dir(setup_tmpdir, new_catalog):
tmpdir_path, subdir_name, jsonfile_name = setup_tmpdir
# base_dir is a file
with pytest.raises(CatalogError):
new_catalog.add_directory(URI('http://example.com/'), pathlib.Path(tmpdir_path) / subdir_name / jsonfile_name)
# base_dir does not exist
with pytest.raises(CatalogError):
new_catalog.add_directory(URI('http://example.com/'), pathlib.Path(tmpdir_path) / 'foo')
@pytest.mark.parametrize('uri', [
'//example.com/foo/bar/file.json', # no scheme
'http://Example.com/foo/bar/file.json', # not normalized
'http://example.com/foo/file.json#', # contains empty fragment
'http://example.com/foo/file.json#bar', # contains non-empty fragment
])
def test_load_json_invalid_uri(uri, new_catalog):
with pytest.raises(CatalogError):
new_catalog.load_json(URI(uri))
@pytest.mark.parametrize('uri, is_known', [
("https://json-schema.org/draft/2020-12/vocab/core", True),
("https://json-schema.org/draft/2020-12/vocab/applicator", True),
("https://json-schema.org/draft/2020-12/vocab/unevaluated", True),
("https://json-schema.org/draft/2020-12/vocab/validation", True),
("https://json-schema.org/draft/2020-12/vocab/meta-data", True),
("https://json-schema.org/draft/2020-12/vocab/format-annotation", True),
("https://json-schema.org/draft/2020-12/meta/format-assertion", False),
("https://json-schema.org/draft/2020-12/vocab/content", True),
])
def test_get_vocabulary(uri, is_known, catalog):
if is_known:
vocabulary = catalog.get_vocabulary(URI(uri))
assert vocabulary.uri == uri
else:
with pytest.raises(CatalogError):
catalog.get_vocabulary(URI(uri))
@pytest.fixture
def example_schema_uri():
schema = JSONSchema(example_schema, metaschema_uri=metaschema_uri_2020_12)
return schema.uri
@pytest.mark.parametrize('ptr, is_schema', [
("", True),
("/$id", False),
("/$defs", False),
("/if", True),
("/then", True),
("/else", True),
])
def test_get_schema(example_schema_uri, ptr, is_schema, catalog):
uri = example_schema_uri.copy(fragment=ptr)
if is_schema:
subschema = catalog.get_schema(uri)
assert JSONPointer(ptr).evaluate(example_schema) == subschema
else:
with pytest.raises(CatalogError):
catalog.get_schema(uri)
def sessioned_schema(uri, schema, session):
kwargs = {'uri': uri, 'metaschema_uri': metaschema_uri_2020_12}
if session is not None:
kwargs['session'] = session
return JSONSchema(schema, **kwargs)
def test_session_independence(catalog):
uri = URI("http://example.com")
sessioned_schema(uri, {"const": 0}, None) # 'default' session
sessioned_schema(uri, {"const": 1}, 'one')
sessioned_schema(uri, {"const": 2}, 'two')
assert catalog.get_schema(uri)["const"] == 0
assert catalog.get_schema(uri, session='default')["const"] == 0
assert catalog.get_schema(uri, session='one')["const"] == 1
assert catalog.get_schema(uri, session='two')["const"] == 2
def test_metaschema_isolation():
new_catalog = create_catalog('2019-09', '2020-12')
assert new_catalog._schema_cache.keys() == {'__meta__'}
# mask the metaschema with a boolean false schema, in the fubar session
sessioned_schema(metaschema_uri_2020_12, False, 'fubar')
uri = URI("http://example.com")
fubar_schema = sessioned_schema(uri, {"$ref": str(metaschema_uri_2020_12)}, 'fubar')
assert fubar_schema.evaluate(JSON(True)).valid is False
# masking the metaschema has no impact on other sessions
okay_schema = sessioned_schema(uri, {"$ref": str(metaschema_uri_2020_12)}, 'okay')
assert okay_schema.evaluate(JSON(True)).valid is True
okay_schema = sessioned_schema(uri, {"$ref": str(metaschema_uri_2020_12)}, None)
assert okay_schema.evaluate(JSON(True)).valid is True
| [
"52427991+marksparkza@users.noreply.github.com"
] | 52427991+marksparkza@users.noreply.github.com |
8562715e27a81c5afaaa0de22707df58099f3ac3 | b5b117371b463ba68be14345549f16098bb311ef | /curso_em_video/mundo_01/desafios/usando_modulos_do_python/ex021.py | d96113d902bbefc8cd4de46c65df64d70bb01bf3 | [] | no_license | thuurzz/Python | f1d0f5038ed97fbf4dc83c352102efcdde25ace8 | 7bd61180fe7594aad7d6cb787772a384f18ced87 | refs/heads/master | 2022-11-05T17:22:02.661665 | 2021-05-17T02:59:37 | 2021-05-17T02:59:37 | 245,733,534 | 0 | 1 | null | 2022-10-23T12:20:43 | 2020-03-08T01:34:31 | Python | UTF-8 | Python | false | false | 142 | py | #tocando MP3 com a lib pygame
import pygame
pygame.init()
pygame.mixer_music.load('ex021.mp3')
pygame.mixer_music.play()
pygame.event.wait()
| [
"arthur.silva@aluno.faculdadeimpacta.com.br"
] | arthur.silva@aluno.faculdadeimpacta.com.br |
0ec455911c1ab290253082808da2e25622d4c158 | c7770d7631f2930cce80462f9c3ee7e2abe118bb | /src/muses/collection/models/period.py | 5bbb6ea5691c3dc24ad1739995e67d4f71ed7624 | [
"Apache-2.0"
] | permissive | Aincient/cleo | 4f277520a22792aa5b505601849a7ff3a4bd4196 | 933ef372fa7847d943206d72bfb03c201dbafbd6 | refs/heads/master | 2021-06-18T11:01:49.137359 | 2021-01-12T16:34:44 | 2021-01-12T16:34:44 | 150,566,366 | 0 | 3 | NOASSERTION | 2021-01-12T16:34:46 | 2018-09-27T10:00:20 | Python | UTF-8 | Python | false | false | 1,416 | py | from __future__ import absolute_import, unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
from mptt.models import MPTTModel, TreeForeignKey
from six import python_2_unicode_compatible
__all__ = (
'Period',
)
@python_2_unicode_compatible
class Period(MPTTModel):
"""Period."""
name_en = models.TextField(
verbose_name=_("English name"),
unique=True
)
name_nl = models.TextField(
verbose_name=_("Dutch name"),
blank=True,
null=True,
unique=False,
)
parent = TreeForeignKey(
'self',
null=True,
blank=True,
related_name='children',
db_index=True
)
date_begin_en = models.CharField(
max_length=255,
null=True,
blank=True,
verbose_name=_("Date begin (EN)"),
)
date_end_en = models.CharField(
max_length=255,
null=True,
blank=True,
verbose_name=_("Date end (EN)"),
)
class MPTTMeta(object):
order_insertion_by = ['name_en']
def date_range(self):
"""Get a string of the date range of a period, if available
:return:
:rtype: str
"""
if self.date_begin_en and self.date_end_en:
return "{} until {}".format(self.date_begin_en, self.date_end_en)
def __str__(self):
return self.name_en
| [
"artur.barseghyan@gmail.com"
] | artur.barseghyan@gmail.com |
8a22f0fb8aa3a956133ff15591c5281360269bd6 | 38da8edb2102ad29eda8784cbb845cac0b96bbca | /176_deco_with_arg.py | da2dd44c77ee84d3fdec4a0a4fd76065eb523c25 | [] | no_license | Prateek2201/Python_codes | 1a655a3e6820e7ecb1fb8a8abd266a8ae0508cb5 | 436a36544edac80cbe420c7b9ddb718df46b68da | refs/heads/main | 2023-08-01T03:10:51.864186 | 2021-09-17T18:08:40 | 2021-09-17T18:08:40 | 407,635,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | from functools import wraps
def only_datatype_allow(data_type):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
if all([type(arg)==data_type for arg in args]):
return func(*args,**kwargs)
else:
return 'Invalid argsuments!'
return wrapper
return decorator
@only_datatype_allow(str)
def string_join(*args):
string= ''
for i in args:
string+= i+' '
return string
print(string_join('prateek','kumar','agrahari'))
| [
"noreply@github.com"
] | Prateek2201.noreply@github.com |
e88f6ac435a26acbd94c78dc15bacb75b8f7c55d | 2da02bd20ae4d621ef02d557ebb7ce20dd5482ff | /clitooltester/test_runner.py | 49eb485cbdbfadddaba8d177dcbb980222cab75f | [
"Apache-2.0"
] | permissive | dfirlabs/clitooltester | 54544eddbe3ec8d3b86a6a6846faa2fadcfc3e37 | ffe23b7b7458212d150390f476cda74e89fc97e1 | refs/heads/main | 2021-11-23T21:44:03.777813 | 2021-10-31T13:04:31 | 2021-10-31T13:04:31 | 228,607,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | # -*- coding: utf-8 -*-
"""Command line tool test runner."""
from __future__ import unicode_literals
class TestRunner(object):
"""Command line tool test runner."""
def ReadConfiguration(self, path):
"""Reads the configuration from a file.
Args:
path (str): path of the configuration file.
"""
| [
"joachim.metz@gmail.com"
] | joachim.metz@gmail.com |
168cf8df7410467e2ce9e176451a4dd10705ab49 | fb5dd7410679bd28299cfe3841de6fe826d978cb | /src/user/migrations/0002_auto_20201207_1825.py | 9f881775e4d02973c4bc130028dbacda53f05e12 | [] | no_license | IvanYukish/finance-manager | 35202fde63a7f519b52d8e09f3f64dd547cccbc5 | 9147d09cff7543361f5ccefa79ec334a58efc9a1 | refs/heads/master | 2023-07-11T14:39:17.536557 | 2021-08-04T23:05:45 | 2021-08-04T23:05:45 | 317,544,811 | 1 | 0 | null | 2021-08-23T17:18:10 | 2020-12-01T13:09:50 | CSS | UTF-8 | Python | false | false | 546 | py | # Generated by Django 3.1.3 on 2020-12-07 18:25
from django.db import migrations, models
import user.validators
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='customuser',
name='phone_number',
field=models.CharField(blank=True, db_index=True, max_length=20, null=True, validators=[user.validators.CustomPhoneNumberValidator()], verbose_name='Номер Телефону'),
),
]
| [
"iwan.jukisch@gmail.com"
] | iwan.jukisch@gmail.com |
06d9b8ff55ed2fbb76bfbdcb87a4babf0d2bacd2 | 0874abd0a592c952a7aad6f4642776168312aee6 | /12-函数/02-注意事项.py | eb39ef7339f010e72b295b58b4eb1f0d5f23fb96 | [] | no_license | EndIFbiu/python-study | 075742d3923adad8061b5f720cabd4a33d3eb0a2 | 62a64a587077ef5f2dcd8a119ba56d3709073bf6 | refs/heads/master | 2023-02-10T08:14:08.144442 | 2020-12-27T12:23:14 | 2020-12-27T12:23:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | # 1.使用一个函数 2.测试注意事项
# 定义函数
def info_print():
print('hello world')
# 调用函数
info_print()
"""
1.先定义后调用
2.没有调用函数,函数代码不会执行
3.函数的执行流程:
当调用函数的时候,解释器会回到定义函数的下方缩进代码执行
执行完后回到调用函数的地方继续向下执行
"""
| [
"270017772@qq.com"
] | 270017772@qq.com |
2777dbd194a8ef3326bbcab1b6100c10510741bb | 43e5441f74359d620be6f7f80c99622769ea9774 | /venv/Lib/site-packages/tb_paddle/file_writer.py | 7140441ed49b89658d66a95feff3501103a1f992 | [] | no_license | 33Da/deeplearn_eassy | 96f1bd09fe3df907c650378215eb686e4ab2801e | 82d60c5ec3aec60822d68d13f11ef1320d0bba2e | refs/heads/master | 2023-02-07T15:02:00.202693 | 2021-01-05T05:03:22 | 2021-01-05T05:03:22 | 326,892,905 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,950 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from .event_file_writer import EventFileWriter
from .proto import event_pb2
class FileWriter(object):
"""Writes protocol buffers to event files to be consumed by TensorBoard.
The `FileWriter` class provides a mechanism to create an event file in a
given directory and add summaries and events to it. The class updates the
file contents asynchronously.
"""
def __init__(self, logdir, max_queue=1024, filename_suffix=''):
"""Creates a `FileWriter` and an event file.
On construction the writer creates a new event file in `logdir`.
The other arguments to the constructor control the asynchronous writes to
the event file.
:param logdir: Directory where event file will be written.
:type logdir: str
:param max_queue: Size of the queue for pending events and
summaries before one of the 'add' calls forces a flush to disk.
:type max_queue: int
:param filename_suffix: Suffix added to all event filenames in the logdir directory.
More details on filename construction in
tensorboard.summary.writer.event_file_writer.EventFileWriter.
:type filename_suffix: str
"""
self.logdir = str(logdir)
self.event_writer = EventFileWriter(self.logdir, max_queue, filename_suffix)
def get_logdir(self):
"""Returns the directory where event file will be written."""
return self.logdir
def add_event(self, event, step=None, walltime=None):
"""Adds an event to the event file.
:param event: An `Event` protocol buffer.
:param step: Optional global step value for training process to record with the event.
:type step: Number
:param walltime: Given time to override the default walltime.
:type walltime: Optional, float
"""
event.wall_time = time.time() if walltime is None else walltime
if step is not None:
# Make sure step is converted from numpy or other formats
# since protobuf might not convert depending on version
event.step = int(step)
self.event_writer.add_event(event)
def add_summary(self, summary, global_step=None, walltime=None):
"""Adds a `Summary` protocol buffer to the event file.
This method wraps the provided summary in an `Event` protocol buffer
and adds it to the event file.
:param summary: A `Summary` protocol buffer.
:param global_step: Optional global step value for training process to record with the summary.
:type global_step: Number
:param walltime: Given time to override the default walltime.
:type walltime: Optional, float
"""
event = event_pb2.Event(summary=summary)
self.add_event(event, global_step, walltime)
def add_graph(self, GraphDef_proto, walltime=None):
"""Adds a `GraphDef` protocol buffer to the event file.
:param graph_profile: A GraphDef protocol buffer.
:param walltime: Optional walltime to override default
(current) walltime (from time.time()) seconds after epoch.
:type walltime: Optional, float
"""
event = event_pb2.Event(graph_def=GraphDef_proto.SerializeToString())
self.add_event(event, None, walltime)
def add_run_metadata(self, run_metadata, tag, global_step=None, walltime=None):
"""Adds a metadata information for a single session.run() call.
:param run_metadata: A `RunMetadata` protobuf object.
:param tag: The tag name for this metadata.
:type tag: string
:param global_step: global step counter to record with the StepStats.
:type global_step: int
:param walltime: Given time to override the default walltime.
:type walltime: Optional, float
"""
tagged_metadata = event_pb2.TaggedRunMetadata(
tag=tag, run_metadata=run_metadata.SerializeToString())
event = event_pb2.Event(tagged_run_metadata=tagged_metadata)
self.add_event(event, global_step, walltime)
def flush(self):
"""Flushes the event file to disk.
Call this method to make sure that all pending events have been written to disk.
"""
self.event_writer.flush()
def close(self):
"""Flushes the event file to disk and close the file.
Call this method when you do not need the summary writer anymore.
"""
self.event_writer.close()
def reopen(self):
"""Reopens the EventFileWriter.
Can be called after `close()` to add more events in the same directory.
The events will go into a new events file.
Does nothing if the EventFileWriter was not closed.
"""
self.event_writer.reopen()
| [
"764720843@qq.com"
] | 764720843@qq.com |
aa9d2e9bb8d661e9be2db95c36fc2df05cd76db3 | c1120d1d6352f35dc988b9874b24cd30f83f2f58 | /search_submit/tests.py | 4731168e669f8ab9ac87f22768712156183b9f33 | [] | no_license | andrem122/Invoice-Management | 70032d86cfdfb2ed21479baae3a8057f88b61047 | 7f7a617a39602a656ff54724c344745038f304b4 | refs/heads/master | 2022-12-11T19:19:47.898336 | 2020-10-01T01:39:52 | 2020-10-01T01:39:52 | 120,393,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | from django.test import TestCase
from .views import Search_Submit_View
from django.test import Client
class Test_Search(TestCase):
def setUp(self):
self.c = Client()
def test_values_normalize_query(self):
#test to see if incorrect data types are dealt with
search_submit_view = Search_Submit_View()
self.assertRaises(ValueError, search_submit_view.normalize_query, True)
self.assertRaises(ValueError, search_submit_view.normalize_query, 2)
self.assertRaises(ValueError, search_submit_view.normalize_query, ['list', 'list'])
self.assertRaises(ValueError, search_submit_view.normalize_query, {'key': 1, 'key': '1'})
def test_post(self):
response = self.c.post('/search/', {'search': 'all'})
self.assertEqual(response.status_code, 302)
response = self.c.post('/search/', {'search': 'test'})
self.assertEqual(response.status_code, 302)
def test_get(self):
response = self.c.get('/search/')
self.assertEqual(response.status_code, 302)
| [
"andre.mashraghi@gmail.com"
] | andre.mashraghi@gmail.com |
f73c52321076fade49523070a1e3b273c1795d7a | 4a1b61cf551db7843050cc7080cec6fd60c4f8cc | /2020/백준문제/백트래킹/14888_연산자 끼워넣기(godyd2702).py | 9cd8659a763dace3fcc1d348c9bc604318b17254 | [] | no_license | phoenix9373/Algorithm | 4551692027ca60e714437fd3b0c86462f635d8ff | c66fd70e14bb8357318e8b8f386d2e968f0c4d98 | refs/heads/master | 2023-08-24T10:01:20.798430 | 2021-10-15T07:57:36 | 2021-10-15T07:57:36 | 288,092,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 526 | py | M = -10 ** 9
m = 10 ** 9
N = int(input())
num = list(map(int, input().split()))
a, b, c, d = map(int, input().split())
def inst(n, i, d1, d2, d3, d4):
global M, m
if i == N:
M = max(M, n);m = min(m, n);return
else:
if d1: inst(n + num[i], i + 1, d1 - 1, d2, d3, d4)
if d2: inst(n - num[i], i + 1, d1, d2 - 1, d3, d4)
if d3: inst(n * num[i], i + 1, d1, d2, d3 - 1, d4)
if d4: inst(int(n / num[i]), i + 1, d1, d2, d3, d4 - 1)
inst(num[0], 1, a, b, c, d)
print(M)
print(m)
| [
"phoenix9373@naver.com"
] | phoenix9373@naver.com |
0b7be87224520bb9408cd8049f7d7e65582aa728 | 4aa7a4d0525095725eb99843c83827ba4806ceb1 | /ML/m26_earlyStopping.py | d85e06cd4038a8f0760a03f63d358972ec9dcef2 | [] | no_license | seonukim/Study | 65a70f5bdfad68f643abc3086d5c7484bb2439d4 | a5f2538f9ae8b5fc93b5149dd51704e8881f0a80 | refs/heads/master | 2022-12-04T17:04:31.489771 | 2020-08-21T00:35:15 | 2020-08-21T00:35:15 | 260,144,755 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,142 | py | # xgboost evaluate
import numpy as np
from sklearn.feature_selection import SelectFromModel
from xgboost import XGBRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
from sklearn.datasets import load_boston
## 데이터
x, y = load_boston(return_X_y = True)
print(x.shape) # (506, 13)
print(y.shape) # (506,)
## train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size = 0.2,
shuffle = True, random_state = 66)
## 모델링
model = XGBRegressor(n_estimators = 1000, # verbose의 갯수, epochs와 동일
learning_rate = 0.1)
model.fit(x_train, y_train,
verbose = True, eval_metric = 'rmse',
eval_set = [(x_train, y_train),
(x_test, y_test)],
early_stopping_rounds = 20)
# eval_metic의 종류 : rmse, mae, logloss, error(error가 0.2면 accuracy는 0.8), auc(정확도, 정밀도; accuracy의 친구다)
results = model.evals_result()
# print("eval's result : ", results)
y_pred = model.predict(x_test)
r2 = r2_score(y_test, y_pred)
print("R2 : ", r2) | [
"92.seoonooo@gmail.com"
] | 92.seoonooo@gmail.com |
3cd0488b6b634aac8022d5257434a461105d2364 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02578/s995045217.py | d1f71aa2eceeb57ffbd4b83ec3aa667c45437a0b | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 241 | py | from sys import stdin
from math import ceil
inp = lambda : stdin.readline().strip()
n = int(inp())
a = [int(x) for x in inp().split()]
curr = 0
ans = 0
for i in a:
curr = max(curr, i)
if i < curr:
ans += curr - i
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
c4afa4d8b74bc42805d910469039f3a47e385759 | 59166105545cdd87626d15bf42e60a9ee1ef2413 | /test/test_locality_api.py | 653a1d95e0e5dc251e9a10fcf65664d83719718b | [] | no_license | mosoriob/dbpedia_api_client | 8c594fc115ce75235315e890d55fbf6bd555fa85 | 8d6f0d04a3a30a82ce0e9277e4c9ce00ecd0c0cc | refs/heads/master | 2022-11-20T01:42:33.481024 | 2020-05-12T23:22:54 | 2020-05-12T23:22:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 945 | py | # coding: utf-8
"""
DBpedia
This is the API of the DBpedia Ontology # noqa: E501
The version of the OpenAPI document: v0.0.1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import dbpedia
from dbpedia.api.locality_api import LocalityApi # noqa: E501
from dbpedia.rest import ApiException
class TestLocalityApi(unittest.TestCase):
"""LocalityApi unit test stubs"""
def setUp(self):
self.api = dbpedia.api.locality_api.LocalityApi() # noqa: E501
def tearDown(self):
pass
def test_localitys_get(self):
"""Test case for localitys_get
List all instances of Locality # noqa: E501
"""
pass
def test_localitys_id_get(self):
"""Test case for localitys_id_get
Get a single Locality by its id # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"maxiosorio@gmail.com"
] | maxiosorio@gmail.com |
d248a55669dbb6f95e7048320573eff2922fcd85 | a8d86cad3f3cc6a977012d007d724bbaf02542f7 | /vendors/marvin/marvin/cloudstackAPI/addBaremetalPxePingServer.py | ad3ae5db84fb4c57e1b4ce248bdcc756014ab43e | [] | no_license | bopopescu/bigrobot | f8d971183119a1d59f21eb2fc08bbec9ee1d522b | 24dad9fb0044df5a473ce4244932431b03b75695 | refs/heads/master | 2022-11-20T04:55:58.470402 | 2015-03-31T18:14:39 | 2015-03-31T18:14:39 | 282,015,194 | 0 | 0 | null | 2020-07-23T17:29:53 | 2020-07-23T17:29:52 | null | UTF-8 | Python | false | false | 3,134 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""add a baremetal ping pxe server"""
from baseCmd import *
from baseResponse import *
class addBaremetalPxePingServerCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "true"
"""Credentials to reach external pxe device"""
"""Required"""
self.password = None
self.typeInfo['password'] = 'string'
"""the Physical Network ID"""
"""Required"""
self.physicalnetworkid = None
self.typeInfo['physicalnetworkid'] = 'uuid'
"""Root directory on PING storage server"""
"""Required"""
self.pingdir = None
self.typeInfo['pingdir'] = 'string'
"""PING storage server ip"""
"""Required"""
self.pingstorageserverip = None
self.typeInfo['pingstorageserverip'] = 'string'
"""type of pxe device"""
"""Required"""
self.pxeservertype = None
self.typeInfo['pxeservertype'] = 'string'
"""Tftp root directory of PXE server"""
"""Required"""
self.tftpdir = None
self.typeInfo['tftpdir'] = 'string'
"""URL of the external pxe device"""
"""Required"""
self.url = None
self.typeInfo['url'] = 'string'
"""Credentials to reach external pxe device"""
"""Required"""
self.username = None
self.typeInfo['username'] = 'string'
"""Password of PING storage server"""
self.pingcifspassword = None
self.typeInfo['pingcifspassword'] = 'string'
"""Username of PING storage server"""
self.pingcifsusername = None
self.typeInfo['pingcifsusername'] = 'string'
"""Pod Id"""
self.podid = None
self.typeInfo['podid'] = 'uuid'
self.required = ["password","physicalnetworkid","pingdir","pingstorageserverip","pxeservertype","tftpdir","url","username",]
class addBaremetalPxePingServerResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""Root directory on PING storage server"""
self.pingdir = None
self.typeInfo['pingdir'] = 'string'
"""PING storage server ip"""
self.pingstorageserverip = None
self.typeInfo['pingstorageserverip'] = 'string'
"""Tftp root directory of PXE server"""
self.tftpdir = None
self.typeInfo['tftpdir'] = 'string'
| [
"vui.le@bigswitch.com"
] | vui.le@bigswitch.com |
2c41f55a2753fd378c6b955b81ea0dc108036626 | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /all-gists/924f6b53a63dede6e59f/snippet.py | 19dcb8b5bcbeee647a0675544e42b8c24949c89b | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 7,080 | py | #
# Extract files from Bare git-annex repositories without git-annex
# Supports version v6
#
# See internals: http://git-annex.branchable.com/internals/
#
# Modified: added non-bare repos, added tar file (of symlinks) output for use with archivemount
#
# TODO: improve output
# TODO: use cat-files instead of archive
# TODO: export to tar WITH relative links
#
# Emanuele Ruffaldi (C) 2016
import sys,argparse,os,subprocess
import md5,tarfile,cStringIO,hashlib,struct
def gitgetpathinfo(branch,path,recurse=False):
"""uses ls-tree to extract information about a path in the branch or in general tree-ish"""
if recurse:
r = "-r"
else:
r = ""
w = subprocess.check_output(["git", "ls-tree",r,branch,"--",path])
return [pa.split("\t") for pa in w.split("\n") if pa != ""] # meta TAB filename ==> meta is: ?? SPACE type
def tarextraclink(content):
"""extracts the path of a link in a Tar expressed by content"""
t = tarfile.open(mode="r",fileobj=cStringIO.StringIO(content))
ti = t.getmembers()[0]
return ti.linkname
def gitgetfile(branch,path):
"""uses archive for extracing the path. This is better than the git show solution because it deals with diff automatically. But does not work with symbolic links"""
xpath,n = os.path.split(path)
xx = "git archive --format=tar --prefix= \"%s:%s\" \"%s\" | tar -xO \"%s\"" % (branch,xpath,n,n)
return subprocess.check_output(xx,shell=True)
def gitgetfile_tar(branch,path):
"""returns the content of a file in tar format"""
try:
xpath,n = os.path.split(path)
xx = "git archive --format=tar --prefix= \"%s:%s\" \"%s\"" % (branch,xpath,n)
return subprocess.check_output(xx,shell=True)
except:
return None
def gitgetfile_show(branch,path):
"""retrieve path content: first getting the hash and then the content via git show"""
found = gitgetpathinfo(branch,path)
if len(found) == 1:
return subprocess.check_output(["git", "show",found[0][0].split(" ")[2]])
else:
return None
def annexgetremotes(useshow):
"""list of remotes AKA uuid.log"""
if useshow:
return gitgetfile_show("git-annex","uuid.log")
else: # slow with bare
return gitgetfile("git-annex","uuid.log")
#https://gist.github.com/giomasce/a7802bda1417521c5b30
def hashdirlower(key):
hasher = hashlib.md5()
hasher.update(key)
digest = hasher.hexdigest()
return "%s/%s/" % (digest[:3], digest[3:6])
#https://gist.github.com/giomasce/a7802bda1417521c5b30
def hashdirmixed(key):
hasher = hashlib.md5()
hasher.update(key)
digest = hasher.digest()
first_word = struct.unpack('<I', digest[:4])[0]
nums = [first_word >> (6 * x) & 31 for x in xrange(4)]
letters = ["0123456789zqjxkmvwgpfZQJXKMVWGPF"[i] for i in nums]
return "%s%s/%s%s/" % (letters[1], letters[0], letters[3], letters[2])
def annexwhereis_bare(key):
"""returns the location of the key object of git-annex"""
#hashdirlower is used for bare git repositories, the git-annex branch, and on special remotes as well.
#m = md5.new()
#m.update(key)
#h = m.hexdigest()
#pre = h[0:3]
#post = h[3:6]
#print key,pre,post
papa = hashdirlower(key)
return gitgetfile("git-annex",os.path.join(papa,key+".log")),os.path.join("annex","objects",papa,key,key)
def annexwhereis(key):
"""returns the location of the key object of git-annex"""
#non bare uses hashdirmixed
#It takes the md5sum of the key, but rather than a string, represents it as 4 32bit words. Only the first word is used. It is converted into a string by the same mechanism that would be used to encode a normal md5sum value into a string, but where that would normally encode the bits using the 16 characters 0-9a-f, this instead uses the 32 characters "0123456789zqjxkmvwgpfZQJXKMVWGPF". The first 2 letters of the resulting string are the first directory, and the second 2 are the second directory.
papaM = hashdirmixed(key)
papaL = hashdirlower(key)
return gitgetfile("git-annex",os.path.join(papaL,key+".log")),os.path.join("annex","objects",papaM,key,key)
def checkbare(args):
"""checks if the repo is a bare"""
gitdir = os.path.join(args.annex,".git")
if os.path.isdir(gitdir):
if not os.path.isdir(os.path.join(gitdir,"annex")):
return None
else:
return False,gitdir
elif os.path.isdir(os.path.join(args.annex,"annex")):
gitdir = args.annex
return True,gitdir
else:
return None
def main():
parser = argparse.ArgumentParser(description='Retrieve file from git-annex, even barebone')
parser.add_argument('--annex', help="path to annex repository",default=".")
parser.add_argument('path', help="file to be looked at",nargs="*")
parser.add_argument('--all', help="list all",action="store_true")
parser.add_argument('--verbose', help="verbose dump",action="store_true")
parser.add_argument('--tar', help="produces a tar file with given path cotaining the symbolic links")
parser.add_argument('--abs',help="makes abs files",action="store_true")
args = parser.parse_args()
# check if bare repository
isbare = checkbare(args)
if isbare is None:
print "not a git-annex repisitory"
isbare,gitdir = isbare
print "isbare?",isbare,gitdir
if not isbare:
workdir = args.annex
else:
workdir = None
os.environ["GIT_DIR"] = gitdir
print "list annexes\n",annexgetremotes(useshow=False)
if args.tar:
ot = tarfile.open(args.tar,"w")
if args.all:
args.path = [x[1] for x in gitgetpathinfo("master","",recurse=True)]
for p in args.path:
# we cannot use
ww = gitgetfile_tar("master",p) # tarred 1 file
if ww is None:
print "not found",p
continue
link = tarextraclink(ww) # extract the link from the single file
if args.verbose:
print "aslink",link
#w = gitgetfile("master",p) -- not working using tar because it is a link
#ref = gitgetfile_show("master",p) -- not working in theory
ref = link
if ref == "":
print "not found",p
else:
key = os.path.split(ref)[1] # the link contains the annex key
if args.verbose:
print "key is",key
if isbare:
locations,path = annexwhereis_bare(key) # extract
else:
locations,path = annexwhereis(key)
path = os.path.join(gitdir,path)
if args.verbose:
print p,"located in\n",locations
if not os.path.isfile(path):
if not isbare:
if os.path.isfile(path+".map"):
mpath = os.path.join(workdir,open(path+".map","r").read().strip())
if os.path.isfile(mpath):
path = mpath
else:
print "mapped file not found",mpath," for ",path # or direct mode not supported
path = None
else:
print "non bare file not found",path # or direct mode not supported
path = None
else:
print "file not found",path # or direct mode not supported
path = None
if path is not None:
ss = os.stat(path)
print path,ss
ti = tarfile.TarInfo(p)
ti.size = 0 # zero for links: ss.st_size
ti.mode = ss.st_mode
ti.mtime = ss.st_mtime
ti.type = tarfile.SYMTYPE
ti.uid = ss.st_uid
ti.gid = ss.st_gid
if args.abs:
ti.linkname = os.path.abspath(path)
else:
ti.linkname = path
ot.addfile(ti)
if __name__ == '__main__':
main() | [
"gistshub@gmail.com"
] | gistshub@gmail.com |
7ba87e118ff1a299de306a3d4f2f509d0f68a0ff | dfb3d0b9e5ed3b061a9dcc0a3605af2daa9c5ef2 | /mobile/urls.py | 4e8184edbf9a14432f32e664681d6d21116d70cd | [] | no_license | aishAMZK/shopping | acd7d3c9ace0df75cd90befcbd38f0b8bb86ff8b | 5f40dbe24854c0e8438005fc896120f6f9d295d5 | refs/heads/master | 2023-04-03T12:42:19.136978 | 2021-04-16T13:29:48 | 2021-04-16T13:29:48 | 358,573,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,167 | py | """mobileproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from .views import Index, Login, Signup, logout
from .views import Cart
from .views import Checkout
urlpatterns = [
path('admin/', admin.site.urls),
path('', Index.as_view(), name='homepage'),
path('signup/', Signup.as_view(), name='signup'),
path('login', Login.as_view(), name='login'),
path('logout', logout, name='logout'),
path('cart', Cart.as_view(), name='cart'),
path('check-out', Checkout.as_view(), name='checkout'),
]
| [
"aiswaryakrishna46@gmail.com"
] | aiswaryakrishna46@gmail.com |
c19e08ed04866b1573abf2e8286143f87b1a9b13 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5706278382862336_1/Python/ikostia/solution.py | 34c5e06e1566a0a1531a0ee3e58b53d70f3a967a | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | #! /usr/bin/python
debug = False
def gcd(a, b):
if b == 0:
return a
return gcd(b, a % b)
def solve(input_data):
den, num = input_data
d = gcd(den, num)
den = den / d
num = num / d
bnum = map(lambda d: d == '1', list(bin(num)[3:]))
if any(bnum):
return "impossible"
res = 1
while den < num / 2 and res <= 40:
res += 1
den = den * 2
if res > 40 and den < num / 2:
return "impossible"
return str(res)
def read_input():
s = raw_input()
den, num = map(int, s.split("/"))
return (den, num)
def main():
T = int(raw_input())
for t in xrange(T):
print "Case #%d: %s" % (t + 1, solve(read_input()))
if __name__ == "__main__":
main()
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
b6c72913fc978ae4001e780fa3bed14b6dcf0015 | ca776f549e4103f3ec8bc1d78ea9ddc98eee9cb4 | /siconos/Local/SpheresPyramid/params.py | 5663ef4012af905bc8c36b3ac0beb18c80fdeaba | [] | no_license | FrictionalContactLibrary/fclib-scripts | d4a0f63333bcfa77e1ca5eaed6be587400a82b47 | dbef104353f3d196273ac99ad9ca0b73dc346fcb | refs/heads/master | 2022-05-25T20:12:17.310343 | 2022-05-13T14:15:38 | 2022-05-13T14:15:38 | 77,398,419 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | import Siconos.Numerics as Numerics
t0 = 0
T = 30
h = 0.0005
g = 9.81
theta = 0.50001
mu = 2.0
dump_itermax = 80
dump_probability = .05
itermax = 100000
NewtonMaxIter = 20
tolerance = 1e-8
solver = Numerics.SICONOS_FRICTION_3D_NSGS
multipointIterations = False
import imp
try:
imp.load_source('mkinput', 'mkinput.py')
except IOError as e:
warn('I need a mkinput.py file')
usage()
exit(1)
import mkinput
fileName = "SpheresPyramid{0}".format(mkinput.N)
title = "SpheresPyramid with {0} levels"
description = """
Spheres pyramid under gravity on the ground with Bullet collision detection
Moreau TimeStepping: h={0}, theta = {1}
One Step non smooth problem: {2}, maxiter={3}, tol={4}
""".format(h, theta, Numerics.idToName(solver),
itermax,
tolerance)
mathInfo = ""
# if we want a shuffled NonsmoothGaussSeidel
#def initialize(model):
# model.simulation().oneStepNSProblem(0).numericsSolverOptions().iparam[9] = 1
| [
"vincent.acary@inria.fr"
] | vincent.acary@inria.fr |
da2f3447df25e8672a8c89d31eb740322ae1790f | 5be8b0f2ee392abeee6970e7a6364ac9a5b8ceaa | /xiaojian/xiaojian/forth_phase/Django./day08/code/note/mynote/admin.py | c52aded9e2cc4e58c61eeed3be026b54015fcc6a | [] | no_license | Wellsjian/20180826 | 424b65f828f0174e4d568131da01dafc2a36050a | 0156ad4db891a2c4b06711748d2624080578620c | refs/heads/master | 2021-06-18T12:16:08.466177 | 2019-09-01T10:06:44 | 2019-09-01T10:06:44 | 204,462,572 | 0 | 1 | null | 2021-04-20T18:26:03 | 2019-08-26T11:38:09 | JavaScript | UTF-8 | Python | false | false | 188 | py | from django.contrib import admin
from . import models
from user import models as u_models
# Register your models here.
admin.site.register(u_models.User)
admin.site.register(models.Note)
| [
"1149158963@qq.com"
] | 1149158963@qq.com |
b0a622e6c7ab5d8810032327c012c41f075dbbbc | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/380/usersdata/348/92907/submittedfiles/principal.py | 8674f107067ffc4d8d05766f3d3d2689e85555f5 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
notas = []
for i in range(0,50,1):
notas.append(float(input('digite a nota%d: ' % ( i+1))))
media = 0
for i in range(0,50,1):
media += notas[i]/50.0
print(notas)
print(media)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
ede12433d9359fd4d0bd72ba7be09ba5b9ad0180 | 71e539273a80f943b0e2164228fe9e5288c90e62 | /Python/KidsWithTheGreatestNumberOfCandies.py | 253eb8e1802aacb6926340c9ff5929c5b0c98a8c | [] | no_license | abhi10010/LeetCode-Solutions | f67206052674585b57b93dae4cd9d68282b39bd6 | 01993de9f431dff487787709af8556f476e6b20b | refs/heads/master | 2022-11-09T20:07:27.689429 | 2020-06-25T13:37:19 | 2020-06-25T13:37:19 | 271,465,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | class Solution:
def kidsWithCandies(self, candies: List[int], extraCandies: int) -> List[bool]:
res = list()
m = max(candies)
for i in candies:
if i + extraCandies >= m:
res.append(True)
else:
res.append(False)
return res
| [
"noreply@github.com"
] | abhi10010.noreply@github.com |
0c82c12388765452f1cf2aab6bd56e2b66ed9de6 | f9f4a41b8274e64a07eef099d937f60715f35d83 | /4_matrix/vec.py | 5ec3ec510e269b32c21f76b614ecad11c157a927 | [] | no_license | tsh/linear-algebra-coding-the-matrix | ae178f177650e56e623c8a6c7f8dda6c972e276d | d9f33165161009d1417c15eccce8ad1196c3248b | refs/heads/master | 2021-01-21T13:04:41.687831 | 2018-02-25T17:53:50 | 2018-02-25T17:53:50 | 55,892,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,040 | py | # version code 24ea27739109+
coursera = 1
# Please fill out this stencil and submit using the provided submission script.
# Copyright 2013 Philip N. Klein
def getitem(v,k):
"""
Return the value of entry k in v.
Be sure getitem(v,k) returns 0 if k is not represented in v.f.
>>> v = Vec({'a','b','c', 'd'},{'a':2,'c':1,'d':3})
>>> v['d']
3
>>> v['b']
0
"""
if k in v.D and k not in v.f:
return 0
else:
return v.f.get(k)
def setitem(v,k,val):
"""
Set the element of v with label d to be val.
setitem(v,d,val) should set the value for key d even if d
is not previously represented in v.f.
>>> v = Vec({'a', 'b', 'c'}, {'b':0})
>>> v['b'] = 5
>>> v['b']
5
>>> v['a'] = 1
>>> v['a']
1
>>> v['a'] = 0
>>> v['a']
0
"""
v.f[k] = val
def equal(u,v):
"""
Return true iff u is equal to v.
Because of sparse representation, it is not enough to compare dictionaries
>>> Vec({'a', 'b', 'c'}, {'a':0}) == Vec({'a', 'b', 'c'}, {'b':0})
True
Be sure that equal(u, v) check equalities for all keys from u.f and v.f even if
some keys in u.f do not exist in v.f (or vice versa)
>>> Vec({'x','y','z'},{'y':1,'x':2}) == Vec({'x','y','z'},{'y':1,'z':0})
False
>>> Vec({'a','b','c'}, {'a':0,'c':1}) == Vec({'a','b','c'}, {'a':0,'c':1,'b':4})
False
>>> Vec({'a','b','c'}, {'a':0,'c':1,'b':4}) == Vec({'a','b','c'}, {'a':0,'c':1})
False
The keys matter:
>>> Vec({'a','b'},{'a':1}) == Vec({'a','b'},{'b':1})
False
The values matter:
>>> Vec({'a','b'},{'a':1}) == Vec({'a','b'},{'a':2})
False
"""
assert u.D == v.D
for k in v.D:
if getitem(v, k) != getitem(u, k):
return False
return True
def add(u,v):
"""
Returns the sum of the two vectors.
Make sure to add together values for all keys from u.f and v.f even if some keys in u.f do not
exist in v.f (or vice versa)
>>> a = Vec({'a','e','i','o','u'}, {'a':0,'e':1,'i':2})
>>> b = Vec({'a','e','i','o','u'}, {'o':4,'u':7})
>>> c = Vec({'a','e','i','o','u'}, {'a':0,'e':1,'i':2,'o':4,'u':7})
>>> a + b == c
True
>>> a == Vec({'a','e','i','o','u'}, {'a':0,'e':1,'i':2})
True
>>> b == Vec({'a','e','i','o','u'}, {'o':4,'u':7})
True
>>> d = Vec({'x','y','z'}, {'x':2,'y':1})
>>> e = Vec({'x','y','z'}, {'z':4,'y':-1})
>>> f = Vec({'x','y','z'}, {'x':2,'y':0,'z':4})
>>> d + e == f
True
>>> b + Vec({'a','e','i','o','u'}, {}) == b
True
"""
assert u.D == v.D
result = {}
for k in u.D:
result[k] = getitem(u, k) + getitem(v, k)
return Vec(u.D, result)
def dot(u,v):
"""
Returns the dot product of the two vectors.
>>> u1 = Vec({'a','b'}, {'a':1, 'b':2})
>>> u2 = Vec({'a','b'}, {'b':2, 'a':1})
>>> u1*u2
5
>>> u1 == Vec({'a','b'}, {'a':1, 'b':2})
True
>>> u2 == Vec({'a','b'}, {'b':2, 'a':1})
True
>>> v1 = Vec({'p','q','r','s'}, {'p':2,'s':3,'q':-1,'r':0})
>>> v2 = Vec({'p','q','r','s'}, {'p':-2,'r':5})
>>> v1*v2
-4
>>> w1 = Vec({'a','b','c'}, {'a':2,'b':3,'c':4})
>>> w2 = Vec({'a','b','c'}, {'a':12,'b':8,'c':6})
>>> w1*w2
72
The pairwise products should not be collected in a set before summing
because a set eliminates duplicates
>>> v1 = Vec({1, 2}, {1 : 3, 2 : 6})
>>> v2 = Vec({1, 2}, {1 : 2, 2 : 1})
>>> v1 * v2
12
"""
assert u.D == v.D
return sum([getitem(v,k) * getitem(u,k) for k in v.D])
def scalar_mul(v, alpha):
"""
Returns the scalar-vector product alpha times v.
>>> zero = Vec({'x','y','z','w'}, {})
>>> u = Vec({'x','y','z','w'},{'x':1,'y':2,'z':3,'w':4})
>>> 0*u == zero
True
>>> 1*u == u
True
>>> 0.5*u == Vec({'x','y','z','w'},{'x':0.5,'y':1,'z':1.5,'w':2})
True
>>> u == Vec({'x','y','z','w'},{'x':1,'y':2,'z':3,'w':4})
True
"""
return Vec(v.D, {k: alpha * getitem(v, k) for k in v.D})
def neg(v):
"""
Returns the negation of a vector.
>>> u = Vec({2,4,6,8},{2:1,4:2,6:3,8:4})
>>> -u
Vec({8, 2, 4, 6},{8: -4, 2: -1, 4: -2, 6: -3})
>>> u == Vec({2,4,6,8},{2:1,4:2,6:3,8:4})
True
>>> -Vec({'a','b','c'}, {'a':1}) == Vec({'a','b','c'}, {'a':-1})
True
"""
return Vec(v.D, {k: -1 * getitem(v, k) for k in v.D})
###############################################################################################################################
class Vec:
"""
A vector has two fields:
D - the domain (a set)
f - a dictionary mapping (some) domain elements to field elements
elements of D not appearing in f are implicitly mapped to zero
"""
def __init__(self, labels, function):
self.D = labels
self.f = function
__getitem__ = getitem
__setitem__ = setitem
__neg__ = neg
__rmul__ = scalar_mul #if left arg of * is primitive, assume it's a scalar
def __mul__(self,other):
#If other is a vector, returns the dot product of self and other
if isinstance(other, Vec):
return dot(self,other)
else:
return NotImplemented # Will cause other.__rmul__(self) to be invoked
def __truediv__(self,other): # Scalar division
return (1/other)*self
__add__ = add
def __radd__(self, other):
"Hack to allow sum(...) to work with vectors"
if other == 0:
return self
def __sub__(a,b):
"Returns a vector which is the difference of a and b."
return a+(-b)
__eq__ = equal
def is_almost_zero(self):
s = 0
for x in self.f.values():
if isinstance(x, int) or isinstance(x, float):
s += x*x
elif isinstance(x, complex):
s += x*x.conjugate()
else: return False
return s < 1e-20
def __str__(v):
"pretty-printing"
D_list = sorted(v.D, key=repr)
numdec = 3
wd = dict([(k,(1+max(len(str(k)), len('{0:.{1}G}'.format(v[k], numdec))))) if isinstance(v[k], int) or isinstance(v[k], float) else (k,(1+max(len(str(k)), len(str(v[k]))))) for k in D_list])
s1 = ''.join(['{0:>{1}}'.format(str(k),wd[k]) for k in D_list])
s2 = ''.join(['{0:>{1}.{2}G}'.format(v[k],wd[k],numdec) if isinstance(v[k], int) or isinstance(v[k], float) else '{0:>{1}}'.format(v[k], wd[k]) for k in D_list])
return "\n" + s1 + "\n" + '-'*sum(wd.values()) +"\n" + s2
def __hash__(self):
"Here we pretend Vecs are immutable so we can form sets of them"
h = hash(frozenset(self.D))
for k,v in sorted(self.f.items(), key = lambda x:repr(x[0])):
if v != 0:
h = hash((h, hash(v)))
return h
def __repr__(self):
return "Vec(" + str(self.D) + "," + str(self.f) + ")"
def copy(self):
"Don't make a new copy of the domain D"
return Vec(self.D, self.f.copy())
| [
"dr.tallin@gmail.com"
] | dr.tallin@gmail.com |
2db35a9389103e8fc0a087681249cddef6e0d20b | a893d00bae0c0fa7db1d42cd14c368033e1c3d3f | /9-5/练习9-14/die.py | 1f6c887e08f14484099ec666f0ad6ae9a2a306ba | [] | no_license | taozhenting/python_introductory | 71ac4b5fe4aa45a9008c9510c77e34e31226f849 | f88afa0b4232e7ba79b42c370f2266fde85e7462 | refs/heads/master | 2020-04-27T06:02:20.169314 | 2019-05-22T09:17:40 | 2019-05-22T09:17:40 | 174,096,850 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | from random import randint
class Die():
def __init__(self,sides=6):
self.sides = sides
self.one = 1
def roll_die(self):
x = randint(self.one,self.sides)
print(x)
def read_die(self,sides2):
self.sides = sides2
self.numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
print(
str(self.sides) +
"面骰子投掷10次:"
)
for self.number in self.numbers:
self.roll_die() | [
"taozt@ichile.com.cn"
] | taozt@ichile.com.cn |
159812a4b18ec101b40c8c31eb36200bb142dfce | 0c66e605e6e4129b09ea14dbb6aa353d18aaa027 | /diventi/landing/migrations/0056_auto_20190413_2004.py | a41e9da7e50da70d6a58ef7449d21c6cbc0ecd64 | [
"Apache-2.0"
] | permissive | flavoi/diventi | 58fbc8c947f387cbcc1ce607878a59a6f2b72313 | c0b1efe2baa3ff816d6ee9a8e86623f297973ded | refs/heads/master | 2023-07-20T09:32:35.897661 | 2023-07-11T19:44:26 | 2023-07-11T19:44:26 | 102,959,477 | 2 | 1 | Apache-2.0 | 2023-02-08T01:03:17 | 2017-09-09T14:10:51 | Python | UTF-8 | Python | false | false | 777 | py | # Generated by Django 2.1.7 on 2019-04-13 18:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('landing', '0055_auto_20190413_2003'),
]
operations = [
migrations.AlterField(
model_name='feature',
name='profile',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='profile_features', to='landing.Presentation'),
),
migrations.AlterField(
model_name='feature',
name='section',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='section_features', to='landing.Section'),
),
]
| [
"flavius476@gmail.com"
] | flavius476@gmail.com |
820bec30ea20c4419a39ee8f3192743c7b3f1c6d | 59fbeea017110472a788218db3c6459e9130c7fe | /n-ary-tree-postorder-traversal/n-ary-tree-postorder-traversal.py | 923780e9e3d8cd5c4ae09ec7bf45b6b7b5e60d78 | [] | no_license | niufenjujuexianhua/Leetcode | 82b55d9382bc9f63f4d9da9431194e20a4d299f1 | 542c99e038d21429853515f62af51a77deaa4d9c | refs/heads/master | 2022-04-27T16:55:00.035969 | 2022-03-10T01:10:04 | 2022-03-10T01:10:04 | 79,742,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | class Solution(object):
def postorder(self, root):
"""
:type root: Node
:rtype: List[int]
"""
res = []
self.dfs(root, res)
return res
def dfs(self, node, res):
if not node:
return
for child in node.children:
self.dfs(child, res)
res.append(node.val) | [
"wutuo123@yeah.net"
] | wutuo123@yeah.net |
6051807f71b0884aa6953e4016a2b4be5a0ece68 | 5dde149d7577425387940d22b6df3919a5b73061 | /Realestate3/Tagent3/models.py | 3808e3f2e33693e673056756ecf1e11ea76a3e0f | [] | no_license | Jagadishbommareddy/agent | cbed666381b1c017c679a4b1b30a838abb17739d | 925659ca1fb4d0138d367f2a4c5675b1473193cd | refs/heads/master | 2021-01-22T00:51:31.824816 | 2017-09-02T12:27:38 | 2017-09-02T12:27:38 | 102,194,819 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,201 | py | from django.db import models
from .validators import*
class ContactInfo(models.Model):
mobile_number = models.CharField(max_length=15,validators=[validate_mobile_no])
phone_number = models.CharField(max_length=15,validators=[validate_phone_no])
email_id = models.EmailField()
class Address(models.Model):
address_id= models.AutoField(primary_key=True)
address1 = models.CharField(max_length=100)
address2 = models.CharField(max_length=10)
city = models.CharField(max_length=20,validators=[validate_city])
state= models.CharField(max_length=20,validators=[validate_state])
landmark= models.CharField(max_length=20,validators=[validate_landmark])
pincode= models.IntegerField()
class AgentReferals(models.Model):
referal_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=20,validators=[validate_name])
verified = models.BinaryField(default=True)
class Media(models.Model):
media_id =models.AutoField(primary_key=True)
media_name= models.CharField(max_length=50,validators=[validate_media_name])
media_path= models.FileField(upload_to='documents/')
class Location(models.Model):
loc_name = models.CharField(max_length=20,validators=[validate_loc_name])
class PropertyType(models.Model):
property_type_id = models.AutoField(primary_key=True)
description = models.CharField(max_length=200)
class Agent(ContactInfo,Media):
agent_id= models.AutoField(primary_key=True)
first_name= models.CharField(max_length=20,validators=[validate_first_name])
last_name= models.CharField(max_length=20,validators=[validate_last_name])
age=models.IntegerField()
education= models.CharField(max_length=50,validators=[validate_education])
company_name=models.CharField(max_length=50)
specialization= models.CharField(max_length=100,validators=[validate_specelization])
experence=models.IntegerField()
agent_notes=models.TextField()
address = models.ManyToManyField("Address")
agentreferal = models.ManyToManyField("AgentReferals")
location = models.ManyToManyField("Location")
propertytype = models.ManyToManyField("PropertyType")
| [
"noreply@github.com"
] | Jagadishbommareddy.noreply@github.com |
178278173909f8e03f01544245087820b88c205d | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/util/datastruct/CaseInsensitiveDuplicateStringComparator.pyi | 5394dd55f716c89f9810c9391ef745871fb4bc39 | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,755 | pyi | import java.lang
import java.util
import java.util.function
class CaseInsensitiveDuplicateStringComparator(object, java.util.Comparator):
"""
Comparator for sorting Strings in a case insensitive way except that case insensitive duplicates
are then sub-sorted by reverse case so that lower case is before upper case.
Example: the strings "abc", "bob", "Bob", "zzz" would always sort as shown. In a normal case
insensitive sort, the "bob" and "Bob" order would be arbitrary.
"""
def __init__(self): ...
@overload
def compare(self, name1: unicode, name2: unicode) -> int: ...
@overload
def compare(self, __a0: object, __a1: object) -> int: ...
@overload
@staticmethod
def comparing(__a0: java.util.function.Function) -> java.util.Comparator: ...
@overload
@staticmethod
def comparing(__a0: java.util.function.Function, __a1: java.util.Comparator) -> java.util.Comparator: ...
@staticmethod
def comparingDouble(__a0: java.util.function.ToDoubleFunction) -> java.util.Comparator: ...
@staticmethod
def comparingInt(__a0: java.util.function.ToIntFunction) -> java.util.Comparator: ...
@staticmethod
def comparingLong(__a0: java.util.function.ToLongFunction) -> java.util.Comparator: ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def hashCode(self) -> int: ...
@staticmethod
def naturalOrder() -> java.util.Comparator: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
@staticmethod
def nullsFirst(__a0: java.util.Comparator) -> java.util.Comparator: ...
@staticmethod
def nullsLast(__a0: java.util.Comparator) -> java.util.Comparator: ...
@staticmethod
def reverseOrder() -> java.util.Comparator: ...
def reversed(self) -> java.util.Comparator: ...
@overload
def thenComparing(self, __a0: java.util.Comparator) -> java.util.Comparator: ...
@overload
def thenComparing(self, __a0: java.util.function.Function) -> java.util.Comparator: ...
@overload
def thenComparing(self, __a0: java.util.function.Function, __a1: java.util.Comparator) -> java.util.Comparator: ...
def thenComparingDouble(self, __a0: java.util.function.ToDoubleFunction) -> java.util.Comparator: ...
def thenComparingInt(self, __a0: java.util.function.ToIntFunction) -> java.util.Comparator: ...
def thenComparingLong(self, __a0: java.util.function.ToLongFunction) -> java.util.Comparator: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
| [
"tsunekou1019@gmail.com"
] | tsunekou1019@gmail.com |
959e579dcc1e44ee49159768722886931a44293a | 67e0e33535229f4e9e520baa9d4ca4db8ce88c10 | /BioClients/ncbo/Client.py | a0abd282522771a9c035503a34ac40c3d8cf6a6e | [
"CC0-1.0"
] | permissive | Huan-Yang/BioClients | 70a89d2067cbc9ab89b241f94c72a90a313927e4 | 7acae54548cf4d14f0a64a8503308934362da1a8 | refs/heads/master | 2023-07-01T18:09:03.993919 | 2021-08-06T00:00:49 | 2021-08-06T00:00:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,375 | py | #!/usr/bin/env python3
"""
http://data.bioontology.org/documentation
The National Center for Biomedical Ontology was founded as one of the
National Centers for Biomedical Computing, supported by the NHGRI, the
NHLBI, and the NIH Common Fund.
"""
###
import sys,os,argparse,re,yaml,logging,time
#
from .. import ncbo
from ..util import yaml as util_yaml
#
#############################################################################
if __name__=='__main__':
EPILOG="""The National Center for Biomedical Ontology was founded as one of the National Centers for Biomedical Computing, supported by the NHGRI, the NHLBI, and the NIH Common Fund."""
parser = argparse.ArgumentParser(description='NCBO REST API client utility', epilog=EPILOG)
OPS = ['recommendOntologies']
parser.add_argument("op", choices=OPS, help="OPERATION")
parser.add_argument("--i", dest="ifile", help="input texts")
parser.add_argument("--o", dest="ofile", help="output (TSV)")
parser.add_argument("--text", help="input text")
parser.add_argument("--api_host", default=ncbo.API_HOST)
parser.add_argument("--api_base_path", default=ncbo.API_BASE_PATH)
parser.add_argument("--param_file", default=os.environ["HOME"]+"/.ncbo.yaml")
parser.add_argument("--api_key", help="API key")
parser.add_argument("-v", "--verbose", default=0, action="count")
args = parser.parse_args()
logging.basicConfig(format="%(levelname)s:%(message)s", level=(logging.DEBUG if args.verbose>1 else logging.INFO))
base_url = "https://"+args.api_host+args.api_base_path
fout = open(args.ofile, "w+") if args.ofile else sys.stdout
params = util_yaml.ReadParamFile(args.param_file)
if args.api_key: params["API_KEY"] = args.api_key
if not params["API_KEY"]:
parser.error("Please specify valid API_KEY via --api_key or --param_file")
texts=[];
if args.ifile:
with open(args.ifile) as fin:
while True:
line = fin.readline()
if not line: break
texts.append(line.rstrip())
logging.info(f"input texts: {len(texts)}")
elif args.text:
texts = [args.text]
t0 = time.time()
if args.op == "recommendOntologies":
ncbo.Utils.RecommendOntologies(base_url, params["API_KEY"], texts, fout)
else:
parser.error(f"Invalid operation: {args.op}")
logging.info(("Elapsed time: %s"%(time.strftime('%Hh:%Mm:%Ss',time.gmtime(time.time()-t0)))))
| [
"jeremyjyang@gmail.com"
] | jeremyjyang@gmail.com |
768a70e5d98d7ea01062017fca6b41623f1ddfca | b72f9d9f0769265cdea2b8caff145af9c532ea09 | /practice/abc010_2.py | 941eb576e2cb058f0028912c7e422106356bcf07 | [] | no_license | ritzcr/AtCoder | 3335fefa8fb1989a0f9da80fe6d0902b46aa2d1f | 15097b0c2568ace653e5080d789047531e50edde | refs/heads/master | 2021-02-12T19:16:41.757421 | 2020-07-05T06:30:57 | 2020-07-05T06:30:57 | 244,620,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | n = int(input())
a = map(int, input().split())
count = 0
for x in a:
for y in range(x, 0, -1):
if y % 2 == 0 or y % 3 == 2:
count += 1
else:
break
print(count) | [
"ritz@freex.ltd"
] | ritz@freex.ltd |
b70ea34a3cade2aad1421876bad3c8c94dd4c5b0 | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/graphics/JiveXML/share/DataTypes_InDet.py | a52fa76f6acd3aeae378a0d3c2e49aa522527249 | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | # This file is provided for backwards compatibility only
# In new jobOptions please use the files included below directly
include ("InDetJiveXML/InDetJiveXML_DataTypes.py")
include ("TrkJiveXML/TrkJiveXML_DataTypes.py")
| [
"rushioda@lxplus754.cern.ch"
] | rushioda@lxplus754.cern.ch |
b0db655fc20d73b8a745c3ee207872f9fb565c98 | e71b6d14fbdbc57c7234ca45a47329d7d02fc6f7 | /flask_api/venv/lib/python3.7/site-packages/vsts/policy/v4_0/models/policy_evaluation_record.py | 86735338fb6201019813c2f3de4b493e5990f4e2 | [] | no_license | u-blavins/secret_sasquatch_society | c36993c738ab29a6a4879bfbeb78a5803f4f2a57 | 0214eadcdfa9b40254e331a6617c50b422212f4c | refs/heads/master | 2020-08-14T00:39:52.948272 | 2020-01-22T13:54:58 | 2020-01-22T13:54:58 | 215,058,646 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,367 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class PolicyEvaluationRecord(Model):
"""PolicyEvaluationRecord.
:param _links:
:type _links: :class:`ReferenceLinks <policy.v4_0.models.ReferenceLinks>`
:param artifact_id:
:type artifact_id: str
:param completed_date:
:type completed_date: datetime
:param configuration:
:type configuration: :class:`PolicyConfiguration <policy.v4_0.models.PolicyConfiguration>`
:param context:
:type context: :class:`object <policy.v4_0.models.object>`
:param evaluation_id:
:type evaluation_id: str
:param started_date:
:type started_date: datetime
:param status:
:type status: object
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'artifact_id': {'key': 'artifactId', 'type': 'str'},
'completed_date': {'key': 'completedDate', 'type': 'iso-8601'},
'configuration': {'key': 'configuration', 'type': 'PolicyConfiguration'},
'context': {'key': 'context', 'type': 'object'},
'evaluation_id': {'key': 'evaluationId', 'type': 'str'},
'started_date': {'key': 'startedDate', 'type': 'iso-8601'},
'status': {'key': 'status', 'type': 'object'}
}
def __init__(self, _links=None, artifact_id=None, completed_date=None, configuration=None, context=None, evaluation_id=None, started_date=None, status=None):
super(PolicyEvaluationRecord, self).__init__()
self._links = _links
self.artifact_id = artifact_id
self.completed_date = completed_date
self.configuration = configuration
self.context = context
self.evaluation_id = evaluation_id
self.started_date = started_date
self.status = status
| [
"usama.blavins1@gmail.com"
] | usama.blavins1@gmail.com |
4ae7f729d5904e4bbc7551d4ed5d9c0ed00c7574 | 1362bc36e86f8216d405b547f5f45874ac332b1e | /Uber/flowingWater.py | 301c6cff2eec1ddfeab6791a1667001183bbebac | [] | no_license | zhuolikevin/Algorithm-Practices-Python | ed5ca06758e35d910ffbea011b414b3c57fd6c7a | 1df8d93a8ecb8627899aadddb5dd5c5d0b144cdf | refs/heads/master | 2021-01-22T01:04:31.536327 | 2016-01-15T13:31:07 | 2016-01-15T13:31:07 | 32,602,632 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,400 | py | class Solution:
def search(self, i, j, visited):
stack = [(i, j)]
while stack:
x, y = stack.pop()
if x < 0 or x >= self.m or y < 0 or y >= self.n or self.mat[x][y] in visited:
continue
visited.add((x, y))
direc = [(1, 0), (-1, 0), (0, 1), (0, -1)]
for k in range(4):
newX = x + direc[k][0]
newY = y + direc[k][1]
if self.mat[newX][newY] in '~*':
continue
if self.mat[newX][newY] >= self.mat[x][y] and (newX, newY) not in visited:
stack.append((newX, newY))
def flowingWater(self, mat):
self.m = len(mat)
self.n = len(mat[0])
self.mat = mat
visited_pac = set()
for i in range(1, self.m-1):
self.search(i, 1, visited_pac)
for j in range(1, self.n-1):
self.search(1, j, visited_pac)
visited_atl = set()
for i in range(1, self.m-1):
self.search(i, self.n-2, visited_atl)
for j in range(1, self.n-1):
self.search(self.m-2, j, visited_atl)
return visited_pac & visited_atl
solution = Solution()
mountain = ['~~~~~~~', '~12235*', '~32344*', '~24531*', '~67145*', '~51124*', '*******']
# mountain = ['~~~~', '~25*', '~86*', '****']
print solution.flowingWater(mountain)
| [
"lizhuogo@gmail.com"
] | lizhuogo@gmail.com |
bdd9de3bd55fd46686b5c58557b70e1fc8644139 | 0fb0dba210ff0f63515c464d7acc95ae32d7603c | /Application/Automate the Process of Reporting the Install Date for ESET/automate-the-process-of-reporting-the-install-date-for-eset.py | 50234b12a83b445eea115ed4e289aae23e4e4005 | [] | no_license | slad99/pythonscripts | 7cbe6b8bb27c8c06e140c46e7c8cf286cbc56d8e | 4e0ebb023899a602cb041ef6f153fd3b7ab032e9 | refs/heads/master | 2022-01-04T21:49:10.486758 | 2019-06-28T14:29:28 | 2019-06-28T14:29:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,668 | py | import os,socket,_winreg,getpass
b=[]
print "USER NAME: "+getpass.getuser()
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
print "IP-ADDRESS : "+(s.getsockname()[0])
from time import gmtime, strftime
time=strftime("%Y-%m-%d %H:%M:%S", gmtime())
port=587
def computername():
import os
return os.environ['COMPUTERNAME']
## get ip address
def ipaddress():
import socket
return socket.gethostbyname(socket.gethostname())
def collectprograms(rtkey,pK,kA):
import _winreg
import os
list=[]
oK=_winreg.OpenKey(rtkey,pK,0,kA)
i=0
while True:
try:
bkey=_winreg.EnumKey(oK,i)
vkey=os.path.join(pK,bkey)
oK1=_winreg.OpenKey(rtkey,vkey,0,kA)
try:
DN,bla=_winreg.QueryValueEx(oK1,'DisplayName')
DV,bla=_winreg.QueryValueEx(oK1,'Publisher')
DI,bla=_winreg.QueryValueEx(oK1,'InstallDate')
inlist=[DN.strip(), DV.strip(),DI.strip()]
if inlist[1]=="None":
gh=0
else:
ki="\n"+inlist[0]+" "+inlist[1]+" Date:"+inlist[2]+"\n"
b.append(ki)
global str2
str2 = ''.join(str(e) for e in b)
except:
pass
i+=1
except:
break
_winreg.CloseKey(oK)
def programsinstalled():
uninstallkey='SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Uninstall'
if 'PROGRAMFILES(X86)' in os.environ.keys():
rklist=[(_winreg.HKEY_LOCAL_MACHINE,uninstallkey,_winreg.KEY_WOW64_32KEY | _winreg.KEY_READ),
(_winreg.HKEY_LOCAL_MACHINE,uninstallkey,_winreg.KEY_WOW64_64KEY | _winreg.KEY_READ),
(_winreg.HKEY_CURRENT_USER,uninstallkey,_winreg.KEY_WOW64_32KEY | _winreg.KEY_READ),
(_winreg.HKEY_CURRENT_USER,uninstallkey,_winreg.KEY_WOW64_64KEY | _winreg.KEY_READ)]
else:
rklist=[(_winreg.HKEY_LOCAL_MACHINE,uninstallkey,_winreg.KEY_READ),
(_winreg.HKEY_CURRENT_USER,uninstallkey,_winreg.KEY_READ)]
collected=''
blacklisted=''
for i in rklist:
col=collectprograms(i[0], i[1], i[2])
programsinstalled()
ki=re.findall('ESET(.*)',str2)
for i in ki:
sam=re.findall('Date:(.*)',i)[0]
d=re.findall('(.*)Date:',i)
val=('').join(d)
sam=list(sam)
sam.insert(4,'/')
sam.insert(7,'/')
strre = ''.join(str(e) for e in sam)
print 'ESET'+val+'Date: '+strre
| [
"noreply@github.com"
] | slad99.noreply@github.com |
9bf830b879b67b8bd3cde7fbd237f059e00381a7 | b1d941be5cd577ce34475339b021784aa9af6395 | /libcloudforensics/logging_utils.py | 747470010f535c75cd9b624fe30eda4db675d793 | [
"Apache-2.0"
] | permissive | google/cloud-forensics-utils | ef21ac682e040b5b977aa897aaf75b3b8ec1ed6d | 38926ef5d075696b2b0f6714f3758be1e6ea1658 | refs/heads/main | 2023-09-04T11:05:42.136161 | 2023-08-28T03:25:22 | 2023-08-28T03:25:22 | 238,205,900 | 418 | 95 | Apache-2.0 | 2023-09-14T05:55:03 | 2020-02-04T12:54:51 | Python | UTF-8 | Python | false | false | 4,407 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module providing custom logging formatters and colorization for ANSI
compatible terminals."""
import logging
import random
import sys
from typing import List
def _GenerateColorSequences() -> List[str]:
"""Generates ANSI codes for 256 colors.
Works on Linux and macOS, Windows (WSL) to be confirmed.
Returns:
List[str]: A list of ANSI codes.
"""
sequences = []
for i in range(0, 16):
for j in range(0, 16):
code = str(i * 16 + j)
seq = '\u001b[38;5;' + code + 'm'
sequences.append(seq)
return sequences
COLOR_SEQS = _GenerateColorSequences()
RESET_SEQ = '\u001b[0m'
# Cherrypick a few interesting values. We still want the whole list of colors
# so that modules have a good amount colors to chose from.
# pylint: disable=unbalanced-tuple-unpacking
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = COLOR_SEQS[8:16]
BG_RED = '\u001b[41m' # Red background
BOLD = '\u001b[1m' # Bold / bright modifier
# We'll get something like this:
# [2020-07-09 18:06:05,187] [libcloudforensics] INFO Disk successfully copied
LOG_FORMAT = ('[%(asctime)s] [{0:s}{color:s}%(name)-20s{1:s}] %(levelname)-8s'
' %(message)s')
LEVEL_COLOR_MAP = {
'WARNING': YELLOW,
'INFO': WHITE,
'DEBUG': BLUE,
'CRITICAL': BOLD + BG_RED + WHITE,
'ERROR': RED
}
class Formatter(logging.Formatter):
"""Helper class used to add color to log messages depending on their level."""
def __init__(self,
colorize: bool = True,
random_color: bool = False,
**kwargs: str) -> None:
"""Initializes the Formatter object.
Args:
colorize (bool): If True, output will be colorized.
random_color (bool): If True, will colorize the module name with a random
color picked from COLOR_SEQS.
"""
self.colorize = colorize
kwargs['fmt'] = LOG_FORMAT.format('', '', color='')
if self.colorize:
color = ''
if random_color:
color = random.choice(COLOR_SEQS)
kwargs['fmt'] = LOG_FORMAT.format(BOLD, RESET_SEQ, color=color)
super().__init__(**kwargs) # type: ignore
def format(self, record: logging.LogRecord) -> str:
"""Hooks the native format method and colorizes messages if needed.
Args:
record (logging.LogRecord): Native log record.
Returns:
str: The formatted message string.
"""
if self.colorize:
message = record.getMessage()
loglevel_color = LEVEL_COLOR_MAP.get(record.levelname)
if loglevel_color:
message = loglevel_color + message + RESET_SEQ
record.msg = message
return super().format(record)
def SetUpLogger(name: str, no_newline: bool = False) -> None:
"""Setup a logger.
Args:
name (str): The name for the logger.
no_newline (bool): Optional. Whether or not to disable new lines in the
logger's output. Defaults to False.
"""
# We can ignore the mypy warning below since the manager is created at runtime
#pylint: disable=no-member
add_handler = name not in logging.root.manager.loggerDict # type: ignore
# pylint: enable=no-member
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
if add_handler:
console_handler = logging.StreamHandler(sys.stdout)
if no_newline:
console_handler.terminator = ''
formatter = Formatter(random_color=True)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
def GetLogger(name: str) -> logging.Logger:
"""Return a logger.
This is a wrapper around logging.getLogger that is intended to be used by
the other modules so that they don't have to import the logging module +
this module.
Args:
name (str); The name for the logger.
Returns:
logging.Logger: The logger.
"""
return logging.getLogger(name)
| [
"noreply@github.com"
] | google.noreply@github.com |
6557dcc3b585514a79943390d6b8f3ae2e3c9ff6 | 96dcea595e7c16cec07b3f649afd65f3660a0bad | /tests/components/zodiac/test_config_flow.py | 18a512e0b455d0198ea9328ba4555bfa0d4e60d1 | [
"Apache-2.0"
] | permissive | home-assistant/core | 3455eac2e9d925c92d30178643b1aaccf3a6484f | 80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743 | refs/heads/dev | 2023-08-31T15:41:06.299469 | 2023-08-31T14:50:53 | 2023-08-31T14:50:53 | 12,888,993 | 35,501 | 20,617 | Apache-2.0 | 2023-09-14T21:50:15 | 2013-09-17T07:29:48 | Python | UTF-8 | Python | false | false | 2,119 | py | """Tests for the Zodiac config flow."""
from unittest.mock import patch
import pytest
from homeassistant.components.zodiac.const import DOMAIN
from homeassistant.config_entries import SOURCE_IMPORT, SOURCE_USER
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import FlowResultType
from tests.common import MockConfigEntry
async def test_full_user_flow(hass: HomeAssistant) -> None:
"""Test the full user configuration flow."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result.get("type") == FlowResultType.FORM
assert result.get("step_id") == "user"
with patch(
"homeassistant.components.zodiac.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={},
)
assert result.get("type") == FlowResultType.CREATE_ENTRY
assert result.get("title") == "Zodiac"
assert result.get("data") == {}
assert result.get("options") == {}
assert len(mock_setup_entry.mock_calls) == 1
@pytest.mark.parametrize("source", [SOURCE_USER, SOURCE_IMPORT])
async def test_single_instance_allowed(
hass: HomeAssistant,
source: str,
) -> None:
"""Test we abort if already setup."""
mock_config_entry = MockConfigEntry(domain=DOMAIN)
mock_config_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": source}
)
assert result.get("type") == FlowResultType.ABORT
assert result.get("reason") == "single_instance_allowed"
async def test_import_flow(
hass: HomeAssistant,
) -> None:
"""Test the import configuration flow."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={},
)
assert result.get("type") == FlowResultType.CREATE_ENTRY
assert result.get("title") == "Zodiac"
assert result.get("data") == {}
assert result.get("options") == {}
| [
"noreply@github.com"
] | home-assistant.noreply@github.com |
8062d69ba2538832e60059ddae623206f877de02 | 81b3efefa7ec376eacfc5c28e4e0b2b8e8fa8a80 | /net/sftpipe | e0ac8722d2cf03fe0b5d181a4bd033cc7974d499 | [
"MIT"
] | permissive | akhuettel/code | 833be8af9615ce3a5519bb803813b30db1f4a230 | 0cc56df9bcef93d19090e82fa7d12b4212123d8e | refs/heads/master | 2023-08-31T06:00:18.154407 | 2021-10-22T05:07:11 | 2021-10-22T05:07:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,322 | #!/usr/bin/env python3
# Copy data from stdin to a remote file over SFTP. To be used if the target
# server doesn't have a POSIX-like shell interface and cannot reliably use 'cat
# > quoted_path'.
import argparse
import paramiko
import re
import sys
import subprocess
def parse_dest(arg):
if arg.startswith("sftp://"):
raise ValueError("parsing sftp:// URLs not supported yet")
else:
if m := re.match(r"^([^:]+):(.+)$", args.dest):
host, path = m.groups()
else:
raise ValueError(f"could not parse {arg!r}")
if m := re.match(r"^(.+)@([^@]+)$", host):
user, host = m.groups()
else:
user = None
return user, host, path
class OpenSshSubsystemChannel():
"""
A socket-like object to be used in place of paramiko.channel.Channel(), in
order to use Paramiko SFTP client with OpenSSH host/user authentication.
"""
def __init__(self, endpoint, subsystem):
self.ep = endpoint
self.subsys = subsystem
self.sshcmd = ["ssh", "-q", "-s", endpoint, subsystem]
self.proc = subprocess.Popen(self.sshcmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
def get_name(self):
return f"[fake channel to {self.subsys!r} on {self.ep!r}]"
def send(self, buf):
n = self.proc.stdin.write(buf)
self.proc.stdin.flush()
return n
def recv(self, nbytes):
return self.proc.stdout.read(nbytes)
def close(self):
self.proc.stdin.close()
self.proc.wait()
parser = argparse.ArgumentParser()
parser.add_argument("dest")
args = parser.parse_args()
user, host, path = parse_dest(args.dest)
print(f"sftpipe: Connecting to {host!r}...", file=sys.stderr)
'''
client = paramiko.client.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.client.WarningPolicy)
client.connect(host,
username=user,
gss_kex=True,
gss_auth=True)
'''
ep = f"{user}@{host}" if user else f"{host}"
chan = OpenSshSubsystemChannel(ep, "sftp")
sftp = paramiko.sftp_client.SFTPClient(chan)
print(f"sftpipe: Uploading to {path!r}...", file=sys.stderr)
sftp.putfo(sys.stdin.buffer, path)
sftp.close()
| [
"grawity@gmail.com"
] | grawity@gmail.com | |
5a8e7108bea6fe1d2aa93e4c7a45b33778200cb6 | 998ced39bbacf743a445ae3f258d9a7215f10794 | /backend/menu/migrations/0001_initial.py | 1846814f922b9a90d09a1e8b8fc3c6674aacd07b | [] | no_license | crowdbotics-apps/test-19340 | d5483e2bbd889e187c3a0b145e40dafedef66af2 | ed7f208217b30631235fcbe61c33b06b189745a2 | refs/heads/master | 2023-07-06T02:51:14.025283 | 2020-08-03T17:19:10 | 2020-08-03T17:19:10 | 284,756,281 | 0 | 0 | null | 2021-08-03T20:03:13 | 2020-08-03T16:54:02 | JavaScript | UTF-8 | Python | false | false | 3,144 | py | # Generated by Django 2.2.15 on 2020-08-03 16:55
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('delivery_user_profile', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField()),
('image', models.URLField()),
('icon', models.URLField()),
],
),
migrations.CreateModel(
name='Country',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField()),
('prefix', models.CharField(max_length=8)),
('flag', models.URLField()),
],
),
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField()),
('image', models.URLField()),
('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='item_category', to='menu.Category')),
],
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rating', models.FloatField()),
('review_text', models.TextField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='review_item', to='menu.Item')),
('profile', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='review_profile', to='delivery_user_profile.Profile')),
],
),
migrations.CreateModel(
name='ItemVariant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField()),
('price', models.FloatField()),
('image', models.URLField()),
('country', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='itemvariant_country', to='menu.Country')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='itemvariant_item', to='menu.Item')),
],
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.