blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ff48941968979ef0668d6935a7cf5d692a04351b | dfeeb6f8a691c104898eee7b9ecefe8015d40f7c | /Pyhton tutorial /132Advanced_Python_Iterators_for_Dictionaries.py | 022a96ddd6c7fba038120681284d26abfe48c699 | [] | no_license | narendra-ism/Python_tutorial_basic_ | 9277926dbfc707a761abe2ddebafb0855249fb68 | 29c2ebd5e7095bfda02d8c03d0afb65a85efe05d | refs/heads/master | 2021-03-30T20:46:17.444715 | 2018-03-12T05:29:16 | 2018-03-12T05:29:16 | 124,831,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 11 18:02:08 2018
@author: narendra
"""
my_dict = {
'name': 'Nick',
'age': 31,
'occupation': 'Dentist',
}
for key in my_dict:
print(key, my_dict[key]) | [
"narendra11d@gmail.com"
] | narendra11d@gmail.com |
3f52a9fa1febbb8892a9673e2e7cb36cd16cbc1f | 6ad41d9b76360c8007b494616374e9e0474f4da8 | /mitogen/debug.py | 8cb1a3675469ddfbcbaf2b70875a73d0235d6d62 | [
"BSD-3-Clause"
] | permissive | danielcompton/mitogen | a1f46aec5766a1309a4a0fb89aac6fcb72d1ee89 | 2813d1a968d6f694514a0053d094c0da9ea4863b | refs/heads/master | 2021-04-17T20:48:30.103447 | 2018-03-25T09:13:20 | 2018-03-25T09:13:20 | 126,739,845 | 0 | 0 | BSD-3-Clause | 2018-03-25T21:00:54 | 2018-03-25T21:00:54 | null | UTF-8 | Python | false | false | 3,279 | py | # Copyright 2017, David Wilson
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Basic signal handler for dumping thread stacks.
"""
import difflib
import os
import signal
import sys
import threading
import time
import traceback
_last = None
def format_stacks():
name_by_id = {
t.ident: t.name
for t in threading.enumerate()
}
l = ['', '']
for threadId, stack in sys._current_frames().items():
l += ["# PID %d ThreadID: (%s) %s; %r" % (
os.getpid(),
name_by_id.get(threadId, '<no name>'),
threadId,
stack,
)]
stack = stack.f_back.f_back
for filename, lineno, name, line in traceback.extract_stack(stack):
l += [
'File: "%s", line %d, in %s' % (
filename,
lineno,
name
)
]
if line:
l += [' ' + line.strip()]
l += ['']
l += ['', '']
return '\n'.join(l)
def _handler(*_):
global _last
s = format_stacks()
fp = open('/dev/tty', 'w', 1)
fp.write(s)
if _last:
fp.write('\n')
diff = list(difflib.unified_diff(
a=_last.splitlines(),
b=s.splitlines(),
fromfile='then',
tofile='now'
))
if diff:
fp.write('\n'.join(diff) + '\n')
else:
fp.write('(no change since last time)\n')
_last = s
def install_handler():
signal.signal(signal.SIGUSR2, _handler)
def _thread_main():
while True:
time.sleep(7)
l = format_stacks()
open('/tmp/stack.%s.log' % (os.getpid(),), 'wb', 65535).write(l)
break
def dump_periodically():
th = threading.Thread(target=main)
th.setDaemon(True)
th.start()
| [
"dw@botanicus.net"
] | dw@botanicus.net |
b6c563a3591bfe9763c33179bd4e387dea5f53bf | d73409535734a788af83a9b2b2e32dd1b979d5d2 | /proxySTAR_V3/certbot/acme/acme/jws.py | f9b81749ab1db42de1e3654eef04f98432698ff3 | [
"Apache-2.0",
"MIT"
] | permissive | mami-project/lurk | adff1fb86cb3e478fe1ded4cbafa6a1e0b93bfdd | 98c293251e9b1e9c9a4b02789486c5ddaf46ba3c | refs/heads/master | 2022-11-02T07:28:22.708152 | 2019-08-24T19:28:58 | 2019-08-24T19:28:58 | 88,050,138 | 2 | 2 | NOASSERTION | 2022-10-22T15:46:11 | 2017-04-12T12:38:33 | Python | UTF-8 | Python | false | false | 2,145 | py | """ACME-specific JWS.
The JWS implementation in acme.jose only implements the base JOSE standard. In
order to support the new header fields defined in ACME, this module defines some
ACME-specific classes that layer on top of acme.jose.
"""
from acme import jose
class Header(jose.Header):
"""ACME-specific JOSE Header. Implements nonce, kid, and url.
"""
nonce = jose.Field('nonce', omitempty=True, encoder=jose.encode_b64jose)
kid = jose.Field('kid', omitempty=True)
url = jose.Field('url', omitempty=True)
@nonce.decoder
def nonce(value): # pylint: disable=missing-docstring,no-self-argument
try:
return jose.decode_b64jose(value)
except jose.DeserializationError as error:
# TODO: custom error
raise jose.DeserializationError("Invalid nonce: {0}".format(error))
class Signature(jose.Signature):
"""ACME-specific Signature. Uses ACME-specific Header for customer fields."""
__slots__ = jose.Signature._orig_slots # pylint: disable=no-member
# TODO: decoder/encoder should accept cls? Otherwise, subclassing
# JSONObjectWithFields is tricky...
header_cls = Header
header = jose.Field(
'header', omitempty=True, default=header_cls(),
decoder=header_cls.from_json)
# TODO: decoder should check that nonce is in the protected header
class JWS(jose.JWS):
"""ACME-specific JWS. Includes none, url, and kid in protected header."""
signature_cls = Signature
__slots__ = jose.JWS._orig_slots # pylint: disable=no-member
@classmethod
# pylint: disable=arguments-differ,too-many-arguments
def sign(cls, payload, key, alg, nonce, url=None, kid=None):
# Per ACME spec, jwk and kid are mutually exclusive, so only include a
# jwk field if kid is not provided.
include_jwk = kid is None
return super(JWS, cls).sign(payload, key=key, alg=alg,
protect=frozenset(['nonce', 'url', 'kid', 'jwk', 'alg']),
nonce=nonce, url=url, kid=kid,
include_jwk=include_jwk)
| [
"diego.deaguilarcanellas@telefonica.com"
] | diego.deaguilarcanellas@telefonica.com |
b598f0eb2b805c60cbd9476ff493934ffdf9de4e | b129c9b11e9d2c06114f45ce03a94f4f2a177119 | /hugin/haproxy/configuration.py | a0d3263efdc2cedcdfab838f248cfbc7c2003847 | [] | no_license | pyfidelity/hugin.haproxy | a9e48e345b03ed9d361c0d6c8617135378f5c311 | 444e30350936883e7749c2371f394fa82c1644fe | refs/heads/master | 2016-09-01T17:29:48.210244 | 2014-11-24T12:34:51 | 2014-11-24T12:34:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | from ConfigParser import RawConfigParser
import re
from paste.util.multidict import MultiDict
class FilterConfig(RawConfigParser, object):
def __init__(self):
RawConfigParser.__init__(self, dict_type=MultiDict)
def urls(self):
output = self._dict()
for key, value in self._sections.items():
output[key] = value['method'], value['match']
return output
def _read(self, *args, **kwargs):
return_value = RawConfigParser._read(self, *args, **kwargs)
for key in self._sections.keys():
self._sections[key]['match'] = re.compile(self._sections[key]['match'])
return return_value | [
"git@matthewwilkes.name"
] | git@matthewwilkes.name |
a760406307209396651413a877d753828833f2df | b2ba88eb56e1f08b823a8865d69a69c395754011 | /PycharmProjects/PythonSeleniumAutoAugSep/12Oct2019/dictWithselenium.py | 6eb730986c1523b8f07b69242fef45e683ad0ad5 | [] | no_license | aravindanath/TeslaEV | 90d4577f4e2e2d0df9d5799acf9263895eb4a98c | a5a4b071da1187fec65f80481bf05a9469d38202 | refs/heads/master | 2020-12-06T12:07:52.074500 | 2020-03-18T15:25:21 | 2020-03-18T15:25:21 | 223,544,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | from selenium import webdriver
import time
td = {"url":"https://www.google.com/",'browser':"ff"}
if td['browser']=="chrome":
driver = webdriver.Chrome("/Users/aravindanathdm/PycharmProjects/PythonSeleniumProject/driver/chromedriver")
elif td['browser']=="ff":
driver = webdriver.Firefox(executable_path="/Users/aravindanathdm/PycharmProjects/PythonSeleniumProject/driver/geckodriver")
driver.get(td['url'])
time.sleep(2)
# driver.quit() | [
"aravindanath86@gmail.com"
] | aravindanath86@gmail.com |
58018bce47bab170230d6e3048ec82dde7848ead | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part005027.py | 27f26130e6844d40cbd135f283b24ee2697dcba2 | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,300 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher65494(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.2.1.2.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.2.1.2.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher65494._instance is None:
CommutativeMatcher65494._instance = CommutativeMatcher65494()
return CommutativeMatcher65494._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 65493
return
yield
from collections import deque | [
"franz.bonazzi@gmail.com"
] | franz.bonazzi@gmail.com |
ee8221f7212eb222c79b8dcfb88d513aad2ddf15 | adf4f40bc899775e4f87b40036d8b9ed8be7e847 | /chapter_03/exercises/greeting.py | 5e43278a36b84cd0b47ab44fdc81c5b049187846 | [] | no_license | ltsuda/python-crash-course | 7473ff150214fc7d7370fa7cebfd009d1a2746e7 | d153929229c071ce4733a68410220f621719983f | refs/heads/master | 2020-06-23T19:14:07.967109 | 2019-09-03T01:00:14 | 2019-09-03T01:12:53 | 198,728,349 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | names = ['chris', 'maka', 'leandro']
print(f'The first name in the list is {names[0]}')
print(f'The last name in the list is {names[-1]}')
| [
"leonardo.tsuda@icloud.com"
] | leonardo.tsuda@icloud.com |
4a77df5b8c2b09a7a235b60d8d7a36c558c4f1d0 | 1df82fa8ef888b74fb9095c9ade89e16895366b1 | /14.Lambdas and Buit In Functions - Exercise/03. Multiplication.py | c04346d3cb290fa33e44cdde67bdb53e43bbb9cc | [] | no_license | filipov73/python_advanced_january_2020 | 868eb4bc365f7774c373183760e7ac584e1bd20c | a5e24190ee08bd1a0534dc04f91a5ba1927d1b19 | refs/heads/master | 2020-11-26T14:07:12.423309 | 2020-02-23T15:20:13 | 2020-02-23T15:20:13 | 229,097,988 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py |
num = int(input())
list_ = [int(x) for x in input().split()]
result = map(lambda x: x * num, list_)
print(" ".join(map(str, result)))
| [
"m_filipov@yahoo.com"
] | m_filipov@yahoo.com |
86ff63b94e72ffb5a1207a33fb869f5c5fbe46f7 | 3bb8b4e9d9b3f38ec4ec8f049c2bb58fce5850ea | /setup.py | 352fcb364ae9a12ec1549006b19b704d46994a12 | [
"Apache-2.0"
] | permissive | klmitch/vapi | 8ae87d1c4032e1b5ae54b50b7bc09e18f3f4e8de | 3b8607d15723a6258ede96f607b32bb1ecf885be | refs/heads/master | 2021-01-21T23:03:45.998643 | 2014-09-24T23:02:34 | 2014-09-24T23:02:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 991 | py | #!/usr/bin/env python
# Copyright 2014 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS
# IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import setuptools
# In python < 2.7.4, a lazy loading of package `pbr` will break
# setuptools if some other modules registered functions in `atexit`.
# solution from: http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing # noqa
except ImportError:
pass
setuptools.setup(
setup_requires=['pbr'],
pbr=True,
)
| [
"kevin.mitchell@rackspace.com"
] | kevin.mitchell@rackspace.com |
85492e471a02ef450052f0f42fe3d28ff42058fb | 7e69c60c23fce92463c78774b5968d3320c715c9 | /python_net/web_cgi/cgi-bin/botengine.py | 76b1f2b939236d3afc24d011cef7790c81f06c16 | [] | no_license | hwet-j/Python | 5128d114cf7257067f68cfb1db502e4f762ac8cc | 3e6f36be665932588a576f44ebb0107a4f350613 | refs/heads/master | 2023-04-08T17:52:31.607225 | 2021-04-17T05:25:02 | 2021-04-17T05:25:02 | 353,336,473 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,685 | py | import codecs
from bs4 import BeautifulSoup
import urllib.request
from konlpy.tag import Okt
import os, re, json, random
dict_file = "chatbot-data.json"
dic = {}
twitter = Okt()
# 딕셔너리에 단어 등록하기 ---
def register_dic(words):
global dic
if len(words) == 0: return
tmp = ["@"]
for i in words:
word = i[0]
if word == "" or word == "\r\n" or word == "\n": continue
tmp.append(word)
if len(tmp) < 3: continue
if len(tmp) > 3: tmp = tmp[1:]
set_word3(dic, tmp)
if word == "." or word == "?":
tmp = ["@"]
continue
# 딕셔너리가 변경될 때마다 저장하기
json.dump(dic, open(dict_file,"w", encoding="utf-8"))
# 딕셔너리에 글 등록하기
def set_word3(dic, s3):
w1, w2, w3 = s3
if not w1 in dic: dic[w1] = {}
if not w2 in dic[w1]: dic[w1][w2] = {}
if not w3 in dic[w1][w2]: dic[w1][w2][w3] = 0
dic[w1][w2][w3] += 1
# 문장 만들기 ---
def make_sentence(head):
if not head in dic: return ""
ret = []
if head != "@":
ret.append(head)
top = dic[head]
w1 = word_choice(top)
w2 = word_choice(top[w1])
ret.append(w1)
ret.append(w2)
while True:
if w1 in dic and w2 in dic[w1]:
w3 = word_choice(dic[w1][w2])
else:
w3 = ""
ret.append(w3)
if w3 == "." or w3 == "? " or w3 == "":
break
w1, w2 = w2, w3
ret = "".join(ret)
# 띄어쓰기
params = urllib.parse.urlencode({
"_callback": "",
"q": ret
})
# 네이버의 맞춤법 검사기 api를 사용
data = urllib.request.urlopen("https://m.search.naver.com/p/csearch/ocontent/spellchecker.nhn?" + params)
data = data.read().decode("utf-8")[1:-2]
data = json.loads(data)
data = data["message"]["result"]["html"]
data = soup = BeautifulSoup(data, "html.parser").getText()
return data
def word_choice(sel):
keys = sel.keys()
return random.choice(list(keys))
# 챗봇 응답 만들기 ---
def make_reply(text):
# 단어 학습 시키기
if not text[-1] in [".", "?"]: text += "."
words = twitter.pos(text)
register_dic(words)
# 사전에 단어가 있다면 그것을 기반으로 문장 만들기
for word in words:
face = word[0]
if face in dic:
return make_sentence(face)
return make_sentence("@")
# 딕셔너리가 있다면 읽어 들이기
if os.path.exists(dict_file):
dic = json.load(open(dict_file, "r")) | [
"ghlckd5424@gmail.com"
] | ghlckd5424@gmail.com |
b62879c41444d90c6c81d3e6f4d4455793c8acc1 | 57106b3c8aab1f8a635806c8c15ffdde3f5d6fc2 | /22data-mining/frequent_patterns/main.py | 939e3a6744ea47a42464c91572bb4ab60bbe4c8c | [
"Apache-2.0"
] | permissive | CoryVegan/python-tutorial | 85e6b824d1f6a39b54d1fa84cd57def192f34e20 | a7c51593d779f0fc9751c2d6093f80878c4ba5c3 | refs/heads/master | 2020-03-22T03:22:43.774009 | 2018-06-28T02:52:14 | 2018-06-28T02:52:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,092 | py | # -*- coding: utf-8 -*-
# Author: XuMing <xuming624@qq.com>
# Brief:
import time
from eclat import eclat_zc
from freq_utils import loadDblpData, loadData, loadUnixData
from apriori import apriori_zc
from fp_growth import fp_growth
def test_fp_growth(minSup, dataSetDict, dataSet):
freqItems = fp_growth(dataSetDict, minSup)
freqItems = sorted(freqItems.items(), key=lambda item: item[1])
return freqItems
def test_apriori(minSup, dataSetDict, dataSet):
freqItems = apriori_zc(dataSet, dataSetDict, minSup)
freqItems = sorted(freqItems.items(), key=lambda item: item[1])
return freqItems
def test_eclat(minSup, dataSetDict, dataSet):
freqItems = eclat_zc(dataSet, minSup)
freqItems = sorted(freqItems.items(), key=lambda item: item[1])
return freqItems
def print_freqItems(logo, freqItems):
print("-------------------", logo, "---------------")
for i in range(len(freqItems)):
print(i, freqItems[i])
print(len(freqItems))
print("-------------------", logo, " end ---------------")
def do_experiment_data_size():
data_name = 'unixData8_pro.txt'
x_name = "Data_Size"
data_num = 980
step = data_num / 5 # #################################################################
all_time = []
x_value = []
for k in range(5):
minSup = data_num * 0.010
dataSetDict, dataSet = loadDblpData(("dataSet/" + data_name), ' ', data_num)
x_value.append(data_num) # #################################################################
if data_num < 0: # #################################################################
break
time_fp = 0
time_et = 0
time_ap = 0
freqItems_fp = {}
freqItems_eclat = {}
freqItems_ap = {}
for i in range(2):
ticks0 = time.time()
freqItems_fp = test_fp_growth(minSup, dataSetDict, dataSet)
time_fp += time.time() - ticks0
ticks0 = time.time()
freqItems_eclat = test_eclat(minSup, dataSetDict, dataSet)
time_et += time.time() - ticks0
ticks0 = time.time()
freqItems_ap = test_apriori(minSup, dataSetDict, dataSet)
time_ap += time.time() - ticks0
print("minSup :", minSup, " data_num :", data_num, \
" freqItems_fp:", " freqItems_eclat:", len(freqItems_eclat), " freqItems_ap:", len(
freqItems_ap))
print("fp_growth:", time_fp / 10, " eclat:", time_et / 10, " apriori:", time_ap / 10)
# print_freqItems("show", freqItems_eclat)
data_num -= step # #################################################################
use_time = [time_fp / 10, time_et / 10, time_ap / 10]
all_time.append(use_time)
# print use_time
y_value = []
for i in range(len(all_time[0])):
tmp = []
for j in range(len(all_time)):
tmp.append(all_time[j][i])
y_value.append(tmp)
return x_value, y_value
def do_experiment_min_support():
data_name = 'unixData8_pro.txt'
x_name = "Min_Support"
data_num = 980
minSup = data_num / 6
dataSetDict, dataSet = loadDblpData(("dataSet/" + data_name), ',', data_num)
step = minSup / 5 # #################################################################
all_time = []
x_value = []
for k in range(5):
x_value.append(minSup) # #################################################################
if minSup < 0: # #################################################################
break
time_fp = 0
time_et = 0
time_ap = 0
freqItems_fp = {}
freqItems_eclat = {}
freqItems_ap = {}
for i in range(10):
ticks0 = time.time()
freqItems_fp = test_fp_growth(minSup, dataSetDict, dataSet)
time_fp += time.time() - ticks0
ticks0 = time.time()
freqItems_eclat = test_eclat(minSup, dataSetDict, dataSet)
time_et += time.time() - ticks0
ticks0 = time.time()
freqItems_ap = test_apriori(minSup, dataSetDict, dataSet)
time_ap += time.time() - ticks0
print("minSup :", minSup, " data_num :", data_num, \
" freqItems_eclat:", len(freqItems_eclat))
print("[time spend] fp_growth:", time_fp / 10, " eclat:", time_et / 10, " apriori:", time_ap / 10)
# print_freqItems("show", freqItems_eclat)
minSup -= step # #################################################################
use_time = [time_fp / 10, time_et / 10, time_ap / 10]
all_time.append(use_time)
# print use_time
y_value = []
for i in range(len(all_time[0])):
tmp = []
for j in range(len(all_time)):
tmp.append(all_time[j][i])
y_value.append(tmp)
return x_value, y_value
def do_test():
dataSetDict, dataSet = loadDblpData(("dataSet/connectPro.txt"), ',', 100)
minSup = 101
# for item in freq_items:
# print item
# freqItems = test_fp_growth(minSup, dataSetDict, dataSet)
# print_freqItems("show", freqItems)
#
freqItems = test_eclat(minSup, dataSetDict, dataSet)
# print_freqItems("show", freqItems)
freqItems_eclat = test_eclat(minSup, dataSetDict, dataSet)
# freqItems_ap = test_apriori(minSup, dataSetDict, dataSet)
# print_freqItems("show", freqItems_ap)
print(len(freqItems_eclat))
def do_dblp_data():
data_name = 'dblpDataAll.txt'
x_name = "Min_Support"
data_num = 2715700
minSup = 100
dataSetDict, dataSet = loadDblpData(("dataSet/" + data_name), ',', data_num)
time_fp = 0
ticks0 = time.time()
freqItems_fp = test_eclat(minSup, dataSetDict, dataSet)
time_fp += time.time() - ticks0
print(time_fp)
for item in freqItems_fp:
print(item)
if __name__ == '__main__':
x_value, y_value = do_experiment_min_support()
x_value, y_value = do_experiment_data_size()
do_test()
do_dblp_data()
| [
"507153809@qq.com"
] | 507153809@qq.com |
84666edef17c1b5cba6573aa7211aaf13565b74d | d930697cc16f69187c0918524e655ab8259b9806 | /src/aux/parsing.py | 811773ccfb4605251941ca7c528dd8eb8454f7fc | [] | no_license | tkusmierczyk/badges2 | 7ff6c6edd8f21f90ec2981ede569c4a7c018a765 | 7738483c2a732a062007b14286ca2fce6684965a | refs/heads/master | 2021-09-22T09:51:08.296245 | 2018-09-07T15:59:33 | 2018-09-07T15:59:33 | 111,401,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,278 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
def str2value(s):
original = s
s = s.lower()
if s=="t" or s=="true": return True
if s=="f" or s=="false": return False
try:
if int(s)==float(s): return int(s)
except: pass
try: return float(s)
except: pass
return original
def tcast(v):
try:
v2 = int(v)
if v2==v: return v2
except:
pass
try:
return float(v)
except:
pass
return str(v)
def parse_dictionary(options_str):
options_str = options_str.replace(";", ",").replace(":", "=")
options = [o.strip() for o in options_str.split(",") if len(o.strip())>0]
options_dict = {}
for option in options:
if "=" not in option:
raise ValueError("options must be given as option=value")
parts = option.split("=")
option, val = parts[0], parts[1]
options_dict[option] = str2value(val)
return options_dict
def format_dict(dct):
return str(dct).strip("{").strip("}").replace("\"", "").replace("'", "")
class objectview(object):
def __init__(self, d):
self.d = d.copy()
self.__dict__ = self.d
def __str__(self):
return(str(self.d))
| [
"you@example.com"
] | you@example.com |
c68ef51105c5285f6a7602dbe1e424ed80366edb | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03061/s017722263.py | daf6b30e11b69ac96f03b0ad16f9930e404f4415 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | import math
n = int(input())
a = list(map(int,input().split()))
L = [0]*(n+1)
R = [0]*(n+1)
for i in range(n):
L[i+1] = math.gcd(L[i], a[i])
R[-2-i] = math.gcd(R[-1-i], a[-1-i])
ans = 0
for i in range(n):
G = math.gcd(L[i], R[i+1])
ans = max(ans, G)
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
51070f08059bac4f36859b19228d0f0ac977d60c | a80b8d4276140c5d702a651ef1fd4540201ae8eb | /homeworks/hw7/views.py | 5b5e8c3e23ff6047fa17d906c1d693c89d25386f | [] | no_license | laky55555/application_security | cc15c7abf8e472634e37ea56fe1b0eb01b6ee060 | a81299f2dfbe93e5785720eb7ccb25b9c5c11b18 | refs/heads/master | 2021-01-23T02:22:51.536155 | 2017-08-19T16:12:02 | 2017-08-19T16:12:02 | 85,986,254 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,839 | py | from django.shortcuts import render
from social_django.admin import UserSocialAuth
import requests
# Create your views here.
def index(request):
page_title = "Seventh homework"
content = "Using OAuth2 from popular social networks"# + '/home/ivan/Dropbox/Faks/5_godina/application_security/homeworks'
problem1 = [('login', 'Create a simple server-side web application that uses OAuth2 from popular sites for login.')]
problem2 = [('facebook', 'Using Facebook API'), ('twitter', 'Using Twitter API'), ('google', 'Using Google+ API'), ]
problems = [problem1, problem2]
return render(request, 'landing.html', {'page_title': page_title, 'content': content, 'problems': problems})
def login(request):
page_title = "Login via social networks"
providers = []
if request.user.is_authenticated():
providers = list(UserSocialAuth.objects.filter(user=request.user).values_list('provider', flat=True))
explanation = [('Account linked to:', providers),
('Facebook', ['<a href="/login/facebook?next=/hw7">Login via facebook</a>',
'Specificy what permissions you want to get in settings: SOCIAL_AUTH_FACEBOOK_SCOPE = ["email", "user_photos"]']),
('Google', ['<a href="/login/google-oauth2?next=/hw7">Login via google</a>']),
('Twitter', ['<a href="/login/twitter?next=/hw7">Login via twitter [NOT IN YET IN USE]</a>']),
('Slack', ['<a href="/login/slack?next=/hw7">Login via slack</a>',]),
('Note', ['Add backends of API-s you want to use in backend tuple (setting.py)',
'Add keys (secret and public) of backed',
'Add pipelanes; info and specification of new users you want to get and import into database)',
'Using pipeline associate_by_email just with services that check email. If they don\'t check it -> <a href="http://python-social-auth.readthedocs.io/en/latest/configuration/django.html">security risk</a>.'])]
return render(request, 'base.html', {'page_title': page_title, 'explanation': explanation})
def get_data_from_facebook(url, token):
response = requests.get(url, params={'access_token': token}).json()
data = response.get('data')
# next = response.get('paging').get('next')
next = data
return (data, next)
def facebook(request):
page_title = "Playing with Facebook API"
providers = []
friend_list = my_albums = latest_posts = False
if request.user.is_authenticated():
providers = list(UserSocialAuth.objects.filter(user=request.user).values_list('provider', flat=True))
user = UserSocialAuth.objects.filter(provider='facebook', user=request.user).first()
if request.method == 'POST' and user and not user.access_token_expired() and request.POST.get('usage') in {'posts', 'albums', 'taggable_friends'}:
usage = request.POST.get('usage')
url = 'https://graph.facebook.com/v2.9/' + user.uid
if usage == 'posts':
latest_posts, next = get_data_from_facebook(url +'/feed?fields=picture,message,permalink_url,created_time', user.access_token)
elif usage == 'albums':
my_albums, next = get_data_from_facebook(url +'/albums?fields=count,link,name,photo_count,picture', user.access_token)
elif usage == 'taggable_friends':
friend_list, next = get_data_from_facebook(url + '/taggable_friends?fields=picture.width(300),name', user.access_token)
return render(request, 'hw7/facebook.html', {'page_title': page_title, 'providers': providers, 'friend_list': friend_list, 'my_albums': my_albums, 'latest_posts': latest_posts})
def twitter(request):
a = 2
def google(request):
a = 2
| [
"laky55555@gmail.com"
] | laky55555@gmail.com |
3d9654063e7ca4dd1188f7023431c014a679a192 | f3b023931812ca0f37bb9fcaf930b8fda4b8609c | /natsr/dataloader.py | cea9d73ac58ad2f2d8f51d3587cd416908036a94 | [
"MIT"
] | permissive | kozistr/NatSR-pytorch | 91bbdc31f94f6a32886f922e9825f1a947509886 | 51b2b5ce9b1fdc0864a299243798a0f78eb7eedc | refs/heads/master | 2022-12-07T23:41:59.485090 | 2022-11-22T09:54:49 | 2022-11-22T09:54:49 | 251,505,048 | 7 | 3 | MIT | 2022-11-22T09:54:08 | 2020-03-31T05:02:47 | Python | UTF-8 | Python | false | false | 4,792 | py | import os
import random
from glob import glob
from math import sqrt
from typing import List, Optional, Tuple
import numpy as np
import torch
from PIL import Image
from torch import cat
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import (
Compose,
RandomCrop,
Resize,
ToPILImage,
ToTensor,
)
from torchvision.transforms.functional import rotate
from natsr import DataSets, DataType, Mode, ModelType
from natsr.utils import get_blurry, get_noisy, is_gpu_available, is_valid_key
def get_scale_factor(scale: int) -> int:
if scale & (scale - 1):
return int(sqrt(scale))
return scale
def get_valid_crop_size(crop_size: int, scale: int) -> int:
return crop_size - (crop_size % scale)
def hr_transform(crop_size: int):
return Compose([RandomCrop(crop_size), ToTensor()])
def lr_transform(crop_size: int, scale: int):
return Compose(
[
ToPILImage(),
Resize(crop_size // scale, interpolation=Image.BICUBIC),
ToTensor(),
]
)
def get_nmd_data(
img, scale: int, alpha: float, sigma: float, mode: str
) -> Optional[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
batch_size: int = img.size(0)
if mode == Mode.TRAIN:
noisy_img = get_noisy(img[: batch_size // 4, :, :, :], sigma)
blurry_img = get_blurry(
img[batch_size // 4 : batch_size // 2, :, :, :], scale, alpha
)
clean_img = img[batch_size // 2 :, :, :, :]
return cat([noisy_img, blurry_img, clean_img], dim=0)
elif mode == Mode.VALID:
noisy_img = get_noisy(img, sigma)
blurry_img = get_blurry(img, scale, alpha)
clean_img = img
return (
cat([blurry_img, clean_img], dim=0),
cat([noisy_img, clean_img], dim=0),
)
raise NotImplementedError(f'[-] not supported mode : {mode}')
class DIV2KDataSet(Dataset):
def __init__(self, config, data_type: str):
self.config = config
self.scale_factor: int = get_scale_factor(
config['data'][DataSets.DIV2K]['scale']
)
self.crop_size: int = get_valid_crop_size(
config['model'][ModelType.FRSR]['height'], self.scale_factor
)
self.hr_image_paths: List[str] = []
self.hr_images: np.ndarray = np.array([], dtype=np.uint8)
self.hr_transform = hr_transform(self.crop_size)
self.lr_transform = lr_transform(self.crop_size, self.scale_factor)
self._get_image_paths(data_type=data_type)
def _get_image_paths(self, data_type: str) -> None:
dataset_path: str = self.config['data'][DataSets.DIV2K]['dataset_path']
if os.path.exists(dataset_path):
self.hr_image_paths = sorted(
glob(
os.path.join(
dataset_path, f'DIV2K_{data_type}_HR', '*.png'
)
)
)
else:
raise FileNotFoundError(
f'[-] there\'s no dataset at {dataset_path}'
)
def __getitem__(self, index: int):
hr_image = Image.open(self.hr_image_paths[index])
hr_image = rotate(hr_image, random.choice([0, 90, 180, 270]))
hr_image = self.hr_transform(hr_image)
lr_image = self.lr_transform(hr_image)
return lr_image, hr_image
def __len__(self):
return len(self.hr_image_paths)
def build_data_loader(
config, data_type: str, override_batch_size: Optional[int] = None
) -> DataLoader:
dataset_type: str = config['data']['dataset_type']
model_type: str = config['model']['model_type']
if not is_valid_key(config['model'], model_type):
raise NotImplementedError(
f'[-] not supported model_type : {model_type}'
)
if dataset_type == DataSets.DIV2K:
dataset = DIV2KDataSet(config, data_type)
else:
raise NotImplementedError(
f'[-] not supported dataset_type : {dataset_type}'
)
data_loader = DataLoader(
dataset=dataset,
batch_size=config['model'][model_type]['batch_size']
if override_batch_size is None
else override_batch_size,
shuffle=True,
pin_memory=is_gpu_available(),
drop_last=False,
num_workers=config['aux']['n_threads'],
)
return data_loader
def build_loader(
config, override_batch_size: Optional[int] = None
) -> Tuple[DataLoader, DataLoader]:
train_data_loader = build_data_loader(
config, data_type=DataType.TRAIN.value
)
valid_data_loader = build_data_loader(
config,
data_type=DataType.VALID.value,
override_batch_size=override_batch_size,
)
return train_data_loader, valid_data_loader
| [
"kozistr@gmail.com"
] | kozistr@gmail.com |
54fa847db262c80c74df746853501b408ac95069 | bbf9b9a382a427dbf90980f609f7ab14dd0511bc | /day9/ByLinkText.py | 2e03bc2356c78fc357be528bc44d57c0c424b807 | [] | no_license | aravindanath/MarvelAutomation | b916a73467ec479ecad67be8c268743feea98816 | 91ae916de90cf0f407eb83ff08ddfb477d8cbea2 | refs/heads/master | 2021-05-18T07:14:15.533717 | 2020-06-07T05:52:06 | 2020-06-07T05:52:06 | 251,174,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | from day8 import LaunchBrowser as lp
import time
from selenium.webdriver.common.keys import Keys
data = "iphone SE2"
lp.driver.get("https://www.google.com")
lp.driver.find_element_by_link_text("తెలుగు").click()
time.sleep(4)
lp.driver.find_element_by_name('q').send_keys("news",Keys.ENTER)
time.sleep(4)
lp.driver.quit()
| [
"aravindanath86@gmail.com"
] | aravindanath86@gmail.com |
1ed435bb36804248b862257ae25e0672980fa2c3 | 8f836e3c4add1af6311abd8c71d517847d29e8f9 | /python_learning/python_book_projectsPractice/web_application/homeworks/pizzeria_18_4/pizzas/migrations/0001_initial.py | ff016843b41af2ca9ca7f2800dc4638d5a34dd86 | [] | no_license | DanilWH/Python | f6282d5aff5d4fa79c1fd0f0108e6c0c3777a485 | b87319409a94e26faf084c22b1eb6a1d55458282 | refs/heads/master | 2021-01-03T21:23:02.305101 | 2020-03-11T16:20:27 | 2020-03-11T16:20:27 | 240,238,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | # Generated by Django 2.2.3 on 2019-07-23 12:38
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Pizza',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
],
),
]
| [
"danil-lomakin-02@mail.ru"
] | danil-lomakin-02@mail.ru |
a98c17ccd06a2ab20d73dde4cff5f5119ea749a8 | 43cdb5fc4202346394272926b37a0671b0862d42 | /winremote/modules/devices.py | 27d7e15aff7e6c2ca24ce6a1c3e3ab48d3706dcc | [] | no_license | machacekondra/winremote | 2d2b9f9bf94cb8340dbcc49a744d74d37a770a2c | 3e79f0f4b37e1a358b45eb22602f44da9068bfee | refs/heads/master | 2021-01-10T07:45:16.907796 | 2016-08-16T10:48:50 | 2016-08-16T10:48:50 | 43,779,439 | 6 | 3 | null | 2016-12-02T11:52:45 | 2015-10-06T21:20:24 | Python | UTF-8 | Python | false | false | 1,767 | py | """
This module implements work with devices via WMI.
"""
def list(session, attributes='Name,ConfigManagerErrorCode'):
"""
Description: return list of all devices on windows machine
:param session: instance of Windows, which hold session to win machine
:type session: winremote.Windows
:param attributes: comma delimited name of attributes to be returned
:type attributes: str
:returns: list of devices info
:rtype: list of dict
"""
return session._wmi.query('select %s from Win32_PnPEntity' % attributes)
def status(session, name):
"""
Description: check status of device
:param session: instance of Windows, which hold session to win machine
:type session: winremote.Windows
:param name: name of the device to fetch info
:type name: str
:returns: True or False, True if device is OK, False otherwise
:rtype: bool
"""
dev = session._wmi.query_first(
"select * from Win32_PnPEntity where Name = '%s'" % name
)
if dev and 'ConfigManagerErrorCode' in dev:
return dev['ConfigManagerErrorCode'] == '0'
return False
def get(session, name, attributes='Name'):
"""
Description: get basic info about windows device @name
:param session: instance of Windows, which hold session to win machine
:type session: winremote.Windows
:param attributes: comma delimited name of attributes to be returned
:type attributes: str
:param name: name of the device to fetch info
:type name: str
:returns: dictionary with device driver information
:returns: info about device, None if device not found
:rtype: dict
"""
return session._wmi.query_first(
"select * from Win32_PnPEntity where Name = '%s'" % name
)
| [
"omachace@redhat.com"
] | omachace@redhat.com |
233cf17b20db9e8da29c2d67bd65024db0765681 | d0533b0574494b13606a557620f38f5a2c74ce16 | /venv/lib/python3.7/site-packages/sympy/matrices/tests/test_normalforms.py | 24475e4b219f83f5338425d08dd3529f0f06a589 | [
"MIT",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft"
] | permissive | CatTiger/vnpy | af889666464ab661fb30fdb0e8f71f94ba2d1e41 | 7901a0fb80a5b44d6fc752bd4b2b64ec62c8f84b | refs/heads/master | 2020-09-26T00:37:54.123877 | 2020-07-13T10:15:46 | 2020-07-13T10:15:46 | 226,124,078 | 0 | 0 | MIT | 2020-04-21T03:02:20 | 2019-12-05T14:44:55 | C++ | UTF-8 | Python | false | false | 843 | py | from sympy import Symbol, Poly
from sympy.polys.solvers import RawMatrix as Matrix
from sympy.matrices.normalforms import invariant_factors, smith_normal_form
from sympy.polys.domains import ZZ, QQ
def test_smith_normal():
m = Matrix([[12, 6, 4,8],[3,9,6,12],[2,16,14,28],[20,10,10,20]])
setattr(m, 'ring', ZZ)
smf = Matrix([[1, 0, 0, 0], [0, 10, 0, 0], [0, 0, -30, 0], [0, 0, 0, 0]])
assert smith_normal_form(m) == smf
x = Symbol('x')
m = Matrix([[Poly(x-1), Poly(1, x),Poly(-1,x)],
[0, Poly(x), Poly(-1,x)],
[Poly(0,x),Poly(-1,x),Poly(x)]])
setattr(m, 'ring', QQ[x])
invs = (Poly(1, x), Poly(x - 1), Poly(x**2 - 1))
assert invariant_factors(m) == invs
m = Matrix([[2, 4]])
setattr(m, 'ring', ZZ)
smf = Matrix([[2, 0]])
assert smith_normal_form(m) == smf
| [
"guozc@133.com"
] | guozc@133.com |
64ec78804a0e924b0fa62b824725c884cdeffb29 | eae3d77ac72c168cee7701462f1fc45d7d4dcd91 | /start1/1240_단순2진암호코드.py | 30d4af020c267c1134e39d99f98595abe3ab4d21 | [] | no_license | ByeongjunCho/Algorithm-TIL | ed2f018d50bd2483bd1175ff9bf7e91913c14766 | ad79125a1498915fe97c1d57ee6860b06c410958 | refs/heads/master | 2022-07-19T15:12:23.689319 | 2020-05-18T08:37:09 | 2020-05-18T08:37:09 | 256,399,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,506 | py | # def check(N, M):
# code = ['0001101', '0011001', '0010011', '0111101', '0100011', '0110001', '0101111', '0111011', '0110111', '0001011']
# code = {code[x]: x for x in range(len(code))}
# passwords = [0] * 8
# # 암호가 존재하는 위치 저장
# ys, ye = 0, 0 # 암호 시작, 암호 끝 행 위치
# for i in range(N):
# if ye:
# break
# elif not ys and int(arr[i]):
# ys = i
# elif ys and not int(arr[i]):
# ye = i - 1
#
# xs, xe = 0, 0 # 암호의 끝 열
# for j in range(M-1, -1, -1):
# if arr[ys][j] == '1':
# xe = j
# break
#
# xs = xe - 55
# start = xs
# for i in range(8):
# tmp = arr[ys][start:start+7]
# k = code.get(tmp)
# if k == None:
# return 0
# passwords[i] = k
# start += 7
# test = 0
# for i in range(0, 7, 2):
# test += passwords[i]
# test *= 3
# for i in range(1, 7, 2):
# test += passwords[i]
# test += passwords[-1]
#
# if test % 10:
# return 0
#
# for j in range(xs, xe + 1):
# for i in range(ys, ye):
# if arr[i][j] != arr[i+1][j]:
# return 0
#
# return sum(passwords)
#
#
# T = int(input())
# for tc in range(1, T+1):
# N, M = map(int, input().split()) # 세로, 가로
# arr = [input() for _ in range(N)]
# print('#{} {}'.format(tc, check(N, M)))
# 간단한 코드
# code = ['0001101', '0011001', '0010011', '0111101', '0100011', '0110001', '0101111', '0111011', '0110111', '0001011']
# code = {code[x]: x for x in range(len(code))}
#
# T = int(input())
# for tc in range(1, T+1):
# N, M = map(int, input().split()) # 세로, 가로
# arr = [input() for _ in range(N)]
#
# def find():
# # 끝나는 위치를 찾음
# for i in range(N):
# for j in range(M-1, 0, -1):
# if arr[i][j] == '0': continue
# pwd = []
# for s in range(j-56+1, j, 7):
# pwd.append(code[arr[i][s: s+7]])
#
# a = pwd[0] + pwd[2] + pwd[4] + pwd[6]
# b = pwd[1] + pwd[3] + pwd[5]
# if (a*3 + b) % 10 == 0:
# return a+b
# else:
# return 0
#
#
# print('#{} {}'.format(tc, find()))
T = int(input())
for tc in range(1, T+1):
N, M = map(int, input().split()) # 세로, 가로
arr = [input() for _ in range(N)]
def find():
# 끝나는 위치를 찾음
for i in range(N):
j = M - 1
while j >= 0:
if arr[i][j] == '1' and arr[i-1][j] == '0':
pwd = []
for _ in range(8):
c2 = c3 = c4 = 0
while arr[i][j] == '0': j-1
while arr[i][j] == '1': c4, j = c4+1, j-1
while arr[i][j] == '0': c3, j = c3 + 1, j - 1
while arr[i][j] == '1': c2, j = c2 + 1, j - 1
MIN = min(c2, c3, c4)
pwd.append(P[(c2//MIN, c3//MIN, c4//MIN)])
j -= c1
b = pwd[0] + pwd[2] + pwd[4] + pwd[6]
a = pwd[1] + pwd[3] + pwd[5]
if (a*3 + b) % 10 == 0:
return a+b
else:
return 0
| [
"jjgk91@naver.com"
] | jjgk91@naver.com |
464fd54ef836816d3e9af6de1a8449fd7e305d75 | c6053ad14e9a9161128ab43ced5604d801ba616d | /Lemon/Python_Base/Lesson10_object_20181117/homework_04.py | 376628267c6c40688877c51fce0c9b6091931be0 | [] | no_license | HesterXu/Home | 0f6bdace39f15e8be26031f88248f2febf33954d | ef8fa0becb687b7b6f73a7167bdde562b8c539be | refs/heads/master | 2020-04-04T00:56:35.183580 | 2018-12-25T02:48:51 | 2018-12-25T02:49:05 | 155,662,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,269 | py | # -*- coding: utf-8 -*-
# @Time : 2018/11/17/13:35
# @Author : Hester Xu
# Email : xuruizhu@yeah.net
# @File : homework_04.py
# @Software : PyCharm
'''
4:按照以下要求定义一个游乐园门票类,并创建实例调用函数,
完成儿童和大人的总票价统计(人数不定,由你输入的人数个数来决定)
1)平日票价100元
2)周末票价为平日票价120%
3)儿童半价
'''
class Ticket:
def __init__(self,time,adult_number,child_number):
self.time = time
self.adult_number = adult_number
self.child_number = child_number
def get_price(self):
if self.time == "weekend":
adult_price = 120
child_price = 60
else:
adult_price = 100
child_price = 50
totla_price = adult_price * self.adult_number + child_price * self.child_number
print("{}个成人和{}个儿童的票价一共是:{}元".format(self.adult_number,self.child_number,totla_price))
time = input("请输入去公园的时间(weekend or weekday):")
adult_number = eval(input("请输入去公园的成人数量:"))
child_number = eval(input("请输入去公园的儿童数量:"))
p = Ticket(time,adult_number,child_number)
p.get_price()
| [
"xuruizhu@yeah.net"
] | xuruizhu@yeah.net |
a1afa2fac8e405059e85a2618a7878c4182aab03 | 45b644af6d0204ff337bf348c007fd036b0fd113 | /0x0B-python-input_output/11-student.py | 925d2a83ec4d8aa2b22e33509ed50e7ff56a1261 | [] | no_license | jaycer95/holbertonschool-higher_level_programming | b5e7f2e72a9da8242befa0945b2935ceea3a086e | 47882b8a4d8b78e09cb372a8b2b85440de2b2d5b | refs/heads/master | 2022-12-20T22:31:35.675364 | 2020-09-24T18:33:21 | 2020-09-24T18:33:21 | 259,335,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | #!/usr/bin/python3
""" create a student class"""
class Student:
""" defines a student """
def __init__(self, first_name, last_name, age):
self.first_name = first_name
self.last_name = last_name
self.age = age
def to_json(self):
return self.__dict__
| [
"jacer.dabbabi95@gmail.com"
] | jacer.dabbabi95@gmail.com |
399a3d37af431b0d4b7205c68bef93e1a1222c45 | edccc564bf3699d7bab9a6b26c369ac85cd32555 | /misc/add_func.py | bba74174e82b95aac6210ab30759e4be88be0ecd | [
"LicenseRef-scancode-public-domain"
] | permissive | NAL-i5K/genomics-workspace | 7bb609b651b6118d3ce8aa49868d7372f5562956 | 013f44dd02980d34a00e4e9b667fa8fea6e824c5 | refs/heads/master | 2023-01-27T20:36:09.871710 | 2021-02-10T13:48:25 | 2021-02-10T13:48:25 | 72,758,632 | 14 | 6 | NOASSERTION | 2023-01-13T22:38:11 | 2016-11-03T15:20:11 | JavaScript | UTF-8 | Python | false | false | 4,539 | py | from blast.models import SequenceType
#from blast.models import BlastDb
from app.models import Organism
import os
import sys
import requests
#from hmmer.models import HmmerDB
def display_name(options):
try:
base_organism = options['Genus_Species'][0].lower().capitalize() + ' ' + options['Genus_Species'][1].lower()
except TypeError:
return 0
if len(options['Genus_Species']) == 3:
display_name = base_organism + ' '+ options['Genus_Species'][2].lower()
return display_name
else:
display_name = base_organism
return display_name
def get_organism(display_name):
organism_database = Organism.objects.get(display_name = display_name)
if organism_database :
return organism_database
else:
print("check your organism name again if it still fails then check your organism database")
sys.exit(0)
def get_path(app_name,title):
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if app_name == 'blast':
path = os.path.join('blast/db',title)
else:
path = os.path.join('hmmer/db',title)
a=os.path.join(base_dir,'media',path)
check = os.path.isfile(a)
if check:
return path
else:
print("No fasta file in media/blast/db or media/hmmer/db")
sys.exit(0)
def short_name(name):
short_name = name.split(' ')
short_name1 = short_name[0][0:3]
short_name2 = short_name[1][0:3]
short_name = short_name1 + short_name2
return short_name
def get_molecule(options):
try:
molecule = options['type'][0].lower() #get molecule_type from command line
if molecule == 'peptide': #change the name tp prot or nucl
molecule2 = 'prot'
elif molecule == 'nucleotide':
molecule2 = 'nucl'
else:
print("please enter the correct molecule_type, must be nucleotide or peptide")
sys.exit(0)
except Exception :
print("enter the argument complete '-t' '-f' ")
sys.exit(0)
molecule_type = SequenceType.objects.filter(molecule_type = molecule2) #get the data from molecule_type field
a = molecule_type[0]
molecule_str = a.molecule_type
return molecule2,molecule_str
def get_dataset(options):
dataset = options['type'][1].lower().capitalize()
if dataset =='Genome':
dataset = dataset + ' ' + options['type'][2].lower().capitalize()
elif dataset == 'Transcript':
pass
elif dataset == 'Protein':
pass
else:
print('enter the correct dataset type')
sys.exit(0)
dataset_type = SequenceType.objects.filter(dataset_type = dataset)
b = dataset_type[0]
dataset_str = str(b.dataset_type)
return dataset,dataset_str
def get_type(dataset,molecule2,molecule_str,dataset_str): #get the sequence type from SequencType Table
if molecule2 != molecule_str :
print("something wrong in molecule")
elif dataset != dataset_str :
print("something wrong with dataset")
else:
try:
dataset_type = SequenceType.objects.filter(molecule_type = molecule2, dataset_type = dataset)
return dataset_type[0]
except IndexError:
print("there are no {molecule} - {dataset} combination in the database".format(molecule=molecule2.capitalize(),dataset=dataset_str))
sys.exit(0)
def get_description(url1,wiki_url2):
try:
re1 = requests.get(url1)
data1 = re1.json()
try:
title = data1['query']['search'][0]['title']
url2 = wiki_url2 + title
re2 = requests.get(url2)
data2 = re2.json()
key = data1['query']['search'][0]['pageid']
key = str(key)
description = data2['query']['pages'][key]['extract']
return description
except IndexError:
print("check your organism name again")
sys.exit(0)
except requests.exceptions.ConnectionError:
print("check your internet connection")
sys.exit(0)
def get_taxid(id_baseurl,name):
try:
url = id_baseurl+ name
re = requests.get(url)
data = re.json()
tax_id = data['esearchresult']['idlist'][0]
tax_id = int(tax_id)
return tax_id
except IndexError:
print("make sure your name is completed and correct")
sys.exit(0)
def delete_org(name):
Organism.objects.filter(display_name = name).delete()
return ("remove %s in database"%name)
| [
"vagrant@localhost.localdomain"
] | vagrant@localhost.localdomain |
4f0f69ccaea1e69b949c44d78167b885c304c83b | f125a883dbcc1912dacb3bf13e0f9263a42e57fe | /tsis5/part1/3.py | db2049f558022d1fc5fdf786334b167dba610d5e | [] | no_license | AruzhanBazarbai/pp2 | 1f28b9439d1b55499dec4158e8906954b507f04a | 9d7f1203b6735b27bb54dfda73b3d2c6b90524c3 | refs/heads/master | 2023-07-13T05:26:02.154105 | 2021-08-27T10:20:34 | 2021-08-27T10:20:34 | 335,332,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | # Write a Python program to append text to a file and display the text
def f_write(fname):
with open(fname,"w") as f:
f.write("11111111\n")
f.write("22222222\n")
txt=open(fname,"r")
print(txt.read())
f_write("abs.txt")
| [
"aruzhanart2003@mail.ru"
] | aruzhanart2003@mail.ru |
06e40101df06c9ccf95a7737360d1f5dd8b2a557 | 229e1e103bc24dda4d8fef54b762009e19045a45 | /configs/nowd/abl/convbn/res101_nl_gc_nowd_innostd_ws1e0.py | 0da33195f3bb6184f370e936c48482241eb57950 | [
"MIT"
] | permissive | yinmh17/CCNet | c0be71919877c0d44c51cd8fd8ad8f644ef618a6 | d5e90fe5ccfa16389fd25bdd3e2160ffe2dfbd22 | refs/heads/master | 2020-06-18T13:03:46.781284 | 2019-11-12T06:26:59 | 2019-11-12T06:26:59 | 196,311,075 | 1 | 1 | MIT | 2019-07-21T19:48:39 | 2019-07-11T03:10:01 | Python | UTF-8 | Python | false | false | 1,135 | py | model = dict(
type='basenet',
pretrained='',
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
block_num=[3, 4, 23, 3],
),
att=dict(
with_att=False,
type='glore',
att_stage=[False,False,True,False],
att_pos='after_add',
att_location=[[],[],[5,11,17],[]],
),
module=dict(
type='nl_nowd',
downsample=False,
whiten_type=['in_nostd'],
weight_init_scale=1.0,
with_unary=False,
with_gc=True,
with_nl=True,
nowd=['nl'],
use_out=True,
out_bn=True,
)
)
train_cfg = dict(
batch_size=8,
learning_rate=1e-2,
momentum=0.9,
num_steps=60000,
power=0.9,
random_seed=1234,
restore_from='./dataset/resnet101-imagenet.pth',
save_num_images=2,
start_iters=0,
save_from=59500,
save_pred_every=100,
snapshot_dir='snapshots/',
weight_decay=0.0005
)
data_cfg = dict(
data_dir='cityscapes',
data_list='./dataset/list/cityscapes/train.lst',
ignore_label=255,
input_size='769,769',
num_classes=19,
)
| [
"yaozhuliang13@gmail.com"
] | yaozhuliang13@gmail.com |
2c2b9eaa06d37224c0965868d3a8b2f6902e69ab | ce32e0e1b9568c710a3168abc3c638d6f9f6c31b | /prod/jobs/refill_binance_spot_bars.py | 066b8b16d8c33677259b33a48a960e20cf5c9842 | [
"MIT"
] | permissive | msincenselee/vnpy | 55ae76ca32cae47369a66bd2d6589c13d7a0bdd4 | 7f4fd3cd202712b083ed7dc2f346ba4bb1bda6d7 | refs/heads/vnpy2 | 2022-05-19T10:06:55.504408 | 2022-03-19T15:26:01 | 2022-03-19T15:26:01 | 38,525,806 | 359 | 158 | MIT | 2020-09-09T00:09:12 | 2015-07-04T07:27:46 | C++ | UTF-8 | Python | false | false | 4,703 | py | # flake8: noqa
import os
import sys
import csv
import pandas as pd
# 将repostory的目录i,作为根目录,添加到系统环境中。
ROOT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
if ROOT_PATH not in sys.path:
sys.path.append(ROOT_PATH)
print(f'append {ROOT_PATH} into sys.path')
from datetime import datetime, timedelta
from vnpy.data.binance.binance_spot_data import BinanceSpotData, HistoryRequest, Exchange, Interval
from vnpy.trader.utility import get_csv_last_dt, append_data
# 获取币安现货交易的所有合约
spot_data = BinanceSpotData()
contracts = BinanceSpotData.load_contracts()
if len(contracts) == 0:
spot_data.save_contracts()
contracts = BinanceSpotData.load_contracts()
# 开始下载日期
start_date = '20170101'
if __name__ == "__main__":
if len(sys.argv) >= 2:
interval = str(sys.argv[1]).lower()
if interval.isdecimal():
interval_num = int(sys.argv[1])
interval_type = Interval.MINUTE
else:
if 'm' in interval:
interval_type = Interval.MINUTE
interval_num = int(interval.replace('m', ''))
elif 'h' in interval:
interval_type = Interval.HOUR
interval_num = int(interval.replace('h', ''))
elif 'd' in interval:
interval_type = Interval.DAILY
interval_num = int(interval.replace('d', ''))
else:
interval = '1m'
interval_num = 1
interval_type = Interval.MINUTE
def download_symbol(symbol, start_dt, bar_file_path, interval_type, interval_num):
req = HistoryRequest(
symbol=symbol,
exchange=Exchange(contract_info.get('exchange')),
interval=interval_type,
interval_num=interval_num,
start=start_dt
)
bars = spot_data.get_bars(req=req, return_dict=True)
spot_data.export_to(bars, file_name=bar_file_path)
# 逐一合约进行下载
for vt_symbol, contract_info in contracts.items():
symbol = contract_info.get('symbol')
if symbol not in ['BTCUSDT', 'ETHUSDT']:
continue
bar_file_path = os.path.abspath(os.path.join(
ROOT_PATH,
'bar_data',
'binance_spot',
f'{symbol}_{start_date}_{interval}.csv'))
# 不存在文件,直接下载,并保存
if not os.path.exists(bar_file_path):
print(f'文件{bar_file_path}不存在,开始时间:{start_date}')
start_dt = datetime.strptime(start_date, '%Y%m%d')
download_symbol(symbol, start_dt, bar_file_path, interval_type, interval_num)
continue
# 如果存在文件,获取最后的bar时间
last_dt = get_csv_last_dt(bar_file_path)
# 获取不到时间,重新下载
if last_dt is None:
print(f'获取文件{bar_file_path}的最后时间失败,开始时间:{start_date}')
start_dt = datetime.strptime(start_date, '%Y%m%d')
download_symbol(symbol, start_dt, bar_file_path, interval_type, interval_num)
continue
# 获取到时间,变成那天的开始时间,下载数据
start_dt = last_dt.replace(hour=0, minute=0, second=0, microsecond=0)
print(f'文件{bar_file_path}存在,最后时间:{last_dt}, 调整数据获取开始时间:{start_dt}')
req = HistoryRequest(
symbol=symbol,
exchange=Exchange(contract_info.get('exchange')),
interval=interval_type,
interval_num=interval_num,
start=start_dt
)
bars = spot_data.get_bars(req=req, return_dict=True)
if len(bars) <= 0:
print(f'下载{symbol} {interval_num} {interval_type.value} 数据为空白')
continue
bar_count = 0
# 获取标题
headers = []
with open(bar_file_path, "r", encoding='utf8') as f:
reader = csv.reader(f)
for header in reader:
headers = header
break
# 写入所有大于最后bar时间的数据
with open(bar_file_path, 'a', encoding='utf8', newline='\n') as csvWriteFile:
writer = csv.DictWriter(f=csvWriteFile, fieldnames=headers, dialect='excel',
extrasaction='ignore')
for bar in bars:
if bar['datetime'] <= last_dt:
continue
bar_count += 1
writer.writerow(bar)
print(f'更新{symbol}数据 => 文件{bar_file_path}, 最后记录:{bars[-1]}')
| [
"incenselee@hotmail.com"
] | incenselee@hotmail.com |
20c7fddfa77a6b8ef7a7fb0847dedd60f878899d | c4a57dced2f1ed5fd5bac6de620e993a6250ca97 | /huaxin/huaxin_ui/ui_android_xjb_2_0/credit_card_reserved_pay_page.py | d67aea16c10dd49cdf86b6503a14c6c650015002 | [] | no_license | wanglili1703/firewill | f1b287b90afddfe4f31ec063ff0bd5802068be4f | 1996f4c01b22b9aec3ae1e243d683af626eb76b8 | refs/heads/master | 2020-05-24T07:51:12.612678 | 2019-05-17T07:38:08 | 2019-05-17T07:38:08 | 187,169,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,039 | py | # coding=utf-8
import time
from _common.page_object import PageObject
from _common.xjb_decorator import robot_log
import huaxin_ui.ui_android_xjb_2_0.credit_card_repay_page
RESERVED_PAY="xpath_//android.widget.TextView[@text='信用卡还款']"
TRADE_PASSWORD = "xpath_//android.widget.EditText[@resource-id='com.shhxzq.xjb:id/trade_pop_password_et']"
CREDIT_CARD_SELECTED="xpath_//android.widget.RelativeLayout[@resource-id='com.shhxzq.xjb:id/rl_credit_item']"
RESERVED_PAY_AMOUNT="xpath_//android.widget.EditText[@text='请输入预约还款金额']"
RESERVED_PAY_DATE="xpath_//android.widget.TextView[@text='请选择信用卡还款日']"
DEDUCTION_DATE="xpath_//android.widget.TextView[@resource-id='com.shhxzq.xjb:id/tv_cr_deduction_date']"
RESERVED_PAY_DATE_MONTH="xpath_//android.view.View[@resource-id='com.shhxzq.xjb:id/month']"
RESERVED_PAY_DATE_DAY="xpath_//android.view.View[@resource-id='com.shhxzq.xjb:id/day']"
RESERVED_PAY_DATE_COMPELETED="xpath_//android.widget.TextView[@text='完成']"
RESERVED_PAY_COMFIRM="xpath_//android.widget.Button[@text='确认还款']"
RESERVED_PAY_DONE="xpath_//android.widget.Button[@text='确认']"
current_page=[]
class ReservedPayPage(PageObject):
def __init__(self, web_driver):
super(ReservedPayPage, self).__init__(web_driver)
self.elements_exist(*current_page)
# 信用卡预约还款
@robot_log
def reserved_pay(self,reserved_pay_amount,trade_password):
self.perform_actions(RESERVED_PAY_AMOUNT,reserved_pay_amount,
RESERVED_PAY_DATE,
RESERVED_PAY_DATE_MONTH,
RESERVED_PAY_DATE_DAY,
RESERVED_PAY_DATE_COMPELETED,
RESERVED_PAY_COMFIRM,
TRADE_PASSWORD,trade_password,
RESERVED_PAY_DONE
)
page=huaxin_ui.ui_android_xjb_2_0.credit_card_repay_page.CreditCardRepayPage(self.web_driver)
return page
| [
"wanglili@shhxzq.com"
] | wanglili@shhxzq.com |
3c3c3a52869314e48ca1ff01f62c307cf14d182f | 0f5cccdf84bb02eafd7e18fbea2f1342bfd48185 | /arch/config/cmsis.py | 9f2345e3b38598ea5959fb283d3d0cccf0b8bba7 | [
"LicenseRef-scancode-unknown-license-reference",
"ISC",
"LicenseRef-scancode-public-domain"
] | permissive | fb321/csp | c56632611a041d391c241d0ed5f0dc32c7387bed | 4963c6933e873073ac4db1837896f5ca087bcd94 | refs/heads/master | 2020-06-27T13:21:00.675587 | 2019-06-29T06:04:40 | 2019-07-02T13:31:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,879 | py | """*****************************************************************************
* Copyright (C) 2018 Microchip Technology Inc. and its subsidiaries.
*
* Subject to your compliance with these terms, you may use Microchip software
* and any derivatives exclusively with Microchip products. It is your
* responsibility to comply with third party license terms applicable to your
* use of third party software (including open source software) that may
* accompany Microchip software.
*
* THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER
* EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED
* WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A
* PARTICULAR PURPOSE.
*
* IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,
* INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND
* WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS
* BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE
* FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN
* ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
* THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
*****************************************************************************"""
def instantiateComponent(cmsisComponent):
cmsisInformation = cmsisComponent.createCommentSymbol("cmsisInformation", None)
import xml.etree.ElementTree as ET
cmsisDescriptionFile = open(Variables.get("__CMSIS_PACK_DIR") + "/ARM.CMSIS.pdsc", "r")
cmsisDescription = ET.fromstring(cmsisDescriptionFile.read())
cmsisInformation.setLabel("Release Information: " + str(cmsisDescription.iter("release").next().attrib))
#check if it is a cortex M device
archNode = ATDF.getNode('/avr-tools-device-file/devices')
if ("m" in archNode.getChildren()[0].getAttribute("architecture").split("CORTEX-")[1].lower()):
coreFile = "core_c" + str(archNode.getChildren()[0].getAttribute("architecture").split("CORTEX-")[1].lower()) + ".h"
# add core header files
headerFileNames = ["cmsis_compiler.h", "cmsis_iccarm.h", "cmsis_gcc.h", "tz_context.h", str(eval('coreFile')), "mpu_armv7.h", "cmsis_version.h"]
#Cortex M23 has MPU v8
if (archNode.getChildren()[0].getAttribute("architecture") == "CORTEX-M23"):
headerFileNames.remove("mpu_armv7.h")
headerFileNames.append("mpu_armv8.h")
for headerFileName in headerFileNames:
szSymbol = "{}_H".format(headerFileName[:-2].upper())
headerFile = cmsisComponent.createFileSymbol(szSymbol, None)
headerFile.setRelative(False)
headerFile.setSourcePath(Variables.get("__CMSIS_PACK_DIR") + "/CMSIS/Core/Include/" + headerFileName)
headerFile.setOutputName(headerFileName)
headerFile.setMarkup(False)
headerFile.setOverwrite(True)
headerFile.setDestPath("../../packs/CMSIS/CMSIS/Core/Include/")
headerFile.setProjectPath("packs/CMSIS/CMSIS/Core/Include/")
headerFile.setType("HEADER")
#assume this is a cortex A device
else:
headerFileNames = ["cmsis_compiler.h", "cmsis_gcc.h", "cmsis_iccarm.h", "cmsis_cp15.h", "core_ca.h"]
# add core header files for cortex a devices
for headerFileName in headerFileNames:
szSymbol = "CORE_A_{}_H".format(headerFileName[:-2].upper())
headerFile = cmsisComponent.createFileSymbol(szSymbol, None)
headerFile.setRelative(False)
headerFile.setSourcePath(Variables.get("__CMSIS_PACK_DIR") + "/CMSIS/Core_A/Include/" + headerFileName)
headerFile.setOutputName(headerFileName)
headerFile.setMarkup(False)
headerFile.setOverwrite(True)
headerFile.setDestPath("../../packs/CMSIS/CMSIS/Core_A/Include/")
headerFile.setProjectPath("packs/CMSIS/CMSIS/Core_A/Include/")
headerFile.setType("HEADER")
# add dsp header files
headerFileNames = ["arm_common_tables.h", "arm_const_structs.h", "arm_math.h"]
for headerFileName in headerFileNames:
szSymbol = "{}_H".format(headerFileName[:-2].upper())
headerFile = cmsisComponent.createFileSymbol(szSymbol, None)
headerFile.setRelative(False)
headerFile.setSourcePath(Variables.get("__CMSIS_PACK_DIR") + "/CMSIS/DSP/Include/" + headerFileName)
headerFile.setOutputName(headerFileName)
headerFile.setMarkup(False)
headerFile.setOverwrite(True)
headerFile.setDestPath("../../packs/CMSIS/CMSIS/DSP/Include/")
headerFile.setProjectPath("packs/CMSIS/CMSIS/DSP/Include/")
headerFile.setType("HEADER")
| [
"http://support.microchip.com"
] | http://support.microchip.com |
94b209e024d003ffb3534976b6821966ef68c231 | 3eee3ee3b0dd5b5f50b0c40390fc0dfda36ccf90 | /examples/textrnn_classification_demo.py | 539ea657a33c75470aa2410ed235370a400b7393 | [
"Apache-2.0",
"Python-2.0"
] | permissive | shibing624/pytextclassifier | d36f514dee0a01c64a2e57d069344d8505cf2140 | daebd31cfbe92606da92f007ffba390475e73b16 | refs/heads/master | 2023-09-01T05:54:11.775314 | 2023-08-22T11:23:37 | 2023-08-22T11:23:37 | 89,688,656 | 263 | 31 | Apache-2.0 | 2023-09-11T12:46:43 | 2017-04-28T09:04:14 | Python | UTF-8 | Python | false | false | 1,502 | py | # -*- coding: utf-8 -*-
"""
@author:XuMing(xuming624@qq.com)
@description:
"""
import sys
sys.path.append('..')
from pytextclassifier import TextRNNClassifier
if __name__ == '__main__':
m = TextRNNClassifier(output_dir='models/textrnn-toy')
data = [
('education', '名师指导托福语法技巧:名词的复数形式'),
('education', '中国高考成绩海外认可 是“狼来了”吗?'),
('education', '公务员考虑越来越吃香,这是怎么回事?'),
('sports', '图文:法网孟菲尔斯苦战进16强 孟菲尔斯怒吼'),
('sports', '四川丹棱举行全国长距登山挑战赛 近万人参与'),
('sports', '米兰客场8战不败国米10年连胜')
]
# train and save best model
m.train(data, num_epochs=3, evaluate_during_training_steps=1)
print(m)
# load best model from model_dir
m.load_model()
predict_label, predict_proba = m.predict(['福建春季公务员考试报名18日截止 2月6日考试',
'意甲首轮补赛交战记录:米兰客场8战不败国米10年连胜'])
print(f'predict_label: {predict_label}, predict_proba: {predict_proba}')
test_data = [
('education', '福建春季公务员考试报名18日截止 2月6日考试'),
('sports', '意甲首轮补赛交战记录:米兰客场8战不败国米10年连胜'),
]
acc_score = m.evaluate_model(test_data)
print(f'acc_score: {acc_score}')
| [
"shibing624@126.com"
] | shibing624@126.com |
9b1471a8abc70f359d9fa154e922f6b368d53732 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2904/60641/301110.py | 1c0cdc02566ddb95f235e917e0595acde8b346c7 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | def main():
num = input()
if num[0] == "-":
print(int("-" + num[:0:-1]))
else:
print(int(num[::-1]))
if __name__ == '__main__':
main()
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
fbdedf4bbdf5e1fff6af26c44e1308075f620afb | 1989c958e197c782f025e45554d39a3e302b1523 | /contact/forms.py | c55d183e7d08116ce3e976f071a435ac4529798d | [] | no_license | Levalife/django_lessons | 27f400ddc515102c62de39456b58b364c3ebfb80 | 0b313089741eb5ba8e6dead105240447585749e3 | refs/heads/master | 2021-01-13T01:40:05.965881 | 2013-07-02T10:08:07 | 2013-07-02T10:08:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | from django import forms
class ContactForm(forms.Form):
subject = forms.CharField(max_length=100)
email = forms.EmailField(required = False, label='Your e-mail address')
message = forms.CharField(widget=forms.Textarea)
def clean_message(self):
message = self.cleaned_data['message']
num_words = len(message.split())
if num_words < 4:
raise forms.ValidationError('Not enough words!')
return message | [
"levushka14@gmail.com"
] | levushka14@gmail.com |
7f5cdeff83e1e295b6b20393714452880925c6b7 | a65e5dc54092a318fc469543c3b96f6699d0c60b | /Personel/Siddhesh/Python/Mar19/Array4.py | 826bf25ff6300f430b57b88d40ae55e6c0e19e92 | [] | no_license | shankar7791/MI-10-DevOps | e15bfda460ffd0afce63274f2f430445d04261fe | f0b9e8c5be7b28298eb6d3fb6badf11cd033881d | refs/heads/main | 2023-07-04T15:25:08.673757 | 2021-08-12T09:12:37 | 2021-08-12T09:12:37 | 339,016,230 | 1 | 0 | null | 2021-08-12T09:12:37 | 2021-02-15T08:50:08 | JavaScript | UTF-8 | Python | false | false | 577 | py | #Changing and Adding Elements
#Arrays are mutable; their elements can be changed in a similar way as lists.
import array as arr
numbers = arr.array('i',[1, 2, 3, 4, 5, 6 ,10])
#changing first element
numbers[0]=0
print (numbers)
#changing 3rd to 5th element
numbers[2:5] = arr.array('i',[4 ,8 ,9])
print(numbers)
#we can add element using append() method
numbers.append(12)
print(numbers)
#we can add element using extennd() method
numbers.extend([7,9,5])
print(numbers)
#we can add element also using insert() method into array
numbers.insert(7, 13)
print(numbers)
| [
"shindesiddhesh07@gmail.com"
] | shindesiddhesh07@gmail.com |
4cef778c67f1af2bbf43a834f84a4ad272c1d7c0 | 3d90c79a7337bff78eb663ef8120e8279498155b | /30 Days of Code/Day 28 RegEx, Patterns, and Intro to Databases.py | 9b4f935dcfaa5d170c590032372d9ee8ae19e8f3 | [] | no_license | ikaushikpal/Hacker_Rank_Problems | b460f7c1d4bf331102239d13a9096ee5cd479d21 | 72e2f2168e1bcfdd267c9daec6da71d5aa44de52 | refs/heads/master | 2022-11-18T00:24:28.529594 | 2020-07-22T11:50:22 | 2020-07-22T11:50:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | import math
import os
import random
import re
import sys
if __name__ == '__main__':
N = int(input())
id = []
for N_itr in range(N):
firstNameEmailID = input().split()
firstName = firstNameEmailID[0]
emailID = firstNameEmailID[1]
if '@gmail' in emailID:
id.append(firstName)
id.sort()
for i in id:
print(i)
| [
"iamkaushik2014@desktop.com"
] | iamkaushik2014@desktop.com |
5bafd0e38072c72e33f2894a208b8ac1c46f7594 | cb4e07b2a5dd30804ce428ec84d9e9f77709fcd5 | /swea/D3/SWEA_5201_컨테이너운반_구진범.py | 0c1b095c642a35ad82382f12e82dd9555f1aa8cc | [] | no_license | jbsam2/algo_problem | 141c17003e88a69afdeea93a723e7f27c4626fdc | 18f2cab5a9af2dec57b7fd6f8218badd7de822e4 | refs/heads/master | 2023-05-18T10:03:00.408300 | 2021-06-02T10:36:50 | 2021-06-02T10:36:50 | 282,104,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | for T in range(int(input())):
n,m=map(int,input().split());ret=0
w=sorted([*map(int,input().split())],reverse=True)
t=sorted([*map(int,input().split())],reverse=True)
for i in t:
for j in w:
if i>=j:ret+=j;w.remove(j);break
print(f'#{T+1}',ret) | [
"kbsam2@gmail.com"
] | kbsam2@gmail.com |
53882e755a639044d51f4ef49f066bb78922a0b9 | 7eebbfaee45fdc57c4fc6ba32c87c35be1e62b14 | /airbyte-integrations/connector-templates/destination-python/integration_tests/integration_test.py | 836df2c8d66ef056068217bc8f8e89be3882e0fb | [
"MIT",
"Elastic-2.0"
] | permissive | Velocity-Engineering/airbyte | b6e1fcead5b9fd7c74d50b9f27118654604dc8e0 | 802a8184cdd11c1eb905a54ed07c8732b0c0b807 | refs/heads/master | 2023-07-31T15:16:27.644737 | 2021-09-28T08:43:51 | 2021-09-28T08:43:51 | 370,730,633 | 0 | 1 | MIT | 2021-06-08T05:58:44 | 2021-05-25T14:55:43 | Java | UTF-8 | Python | false | false | 1,171 | py | # MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
def integration_test():
# TODO write integration tests
pass
| [
"noreply@github.com"
] | Velocity-Engineering.noreply@github.com |
d0e9940d5b58451bb8468f501adf11de55dca199 | 8dbb2a3e2286c97b1baa3ee54210189f8470eb4d | /kubernetes-stubs/client/models/v2beta1_resource_metric_status.pyi | 37f745aeb5fbb9ef0d7d17bc2065e18e6af5f79a | [] | no_license | foodpairing/kubernetes-stubs | e4b0f687254316e6f2954bacaa69ff898a88bde4 | f510dc3d350ec998787f543a280dd619449b5445 | refs/heads/master | 2023-08-21T21:00:54.485923 | 2021-08-25T03:53:07 | 2021-08-25T04:45:17 | 414,555,568 | 0 | 0 | null | 2021-10-07T10:26:08 | 2021-10-07T10:26:08 | null | UTF-8 | Python | false | false | 600 | pyi | import datetime
import typing
import kubernetes.client
class V2beta1ResourceMetricStatus:
current_average_utilization: typing.Optional[int]
current_average_value: str
name: str
def __init__(
self,
*,
current_average_utilization: typing.Optional[int] = ...,
current_average_value: str,
name: str
) -> None: ...
def to_dict(self) -> V2beta1ResourceMetricStatusDict: ...
class V2beta1ResourceMetricStatusDict(typing.TypedDict, total=False):
currentAverageUtilization: typing.Optional[int]
currentAverageValue: str
name: str
| [
"nikhil.benesch@gmail.com"
] | nikhil.benesch@gmail.com |
933f509f45c0fd0a83dfdb92b9c39cf33d4e37f7 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit_QC2595.py | 5a64c80e92f317a9578fed16757ba858da82bd83 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,185 | py | # qubit number=4
# total number=30
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=27
prog.cz(input_qubit[0],input_qubit[3]) # number=28
prog.h(input_qubit[3]) # number=29
prog.x(input_qubit[3]) # number=15
prog.rx(1.8001325905069514,input_qubit[3]) # number=18
prog.cx(input_qubit[0],input_qubit[3]) # number=16
prog.h(input_qubit[1]) # number=22
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
prog.x(input_qubit[3]) # number=24
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.x(input_qubit[1]) # number=25
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.z(input_qubit[1]) # number=21
prog.h(input_qubit[0]) # number=9
prog.cx(input_qubit[2],input_qubit[0]) # number=10
prog.x(input_qubit[1]) # number=17
prog.cx(input_qubit[2],input_qubit[0]) # number=11
prog.y(input_qubit[0]) # number=12
prog.y(input_qubit[0]) # number=13
prog.z(input_qubit[2]) # number=26
prog.cx(input_qubit[2],input_qubit[1]) # number=23
prog.x(input_qubit[0]) # number=19
prog.x(input_qubit[0]) # number=20
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC2595.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
a636a08716e18c4d09c625704f0d0e10db999f25 | ed4910769a51691f222a3e311215b014dc64ae3a | /wagtail/api/v2/filters.py | 42411f3c32464a76b5947023760b645c5fef406d | [
"BSD-3-Clause"
] | permissive | mr-const/wagtail | cba2db26a5b370aef2fc5dd41ca0f0ba95bf6536 | 091e26adfb9e4dc9bdf70be3572c104c356c664d | refs/heads/master | 2021-01-13T06:28:51.819517 | 2016-03-10T15:29:30 | 2016-03-10T15:29:30 | 53,587,500 | 0 | 0 | null | 2016-03-10T13:43:50 | 2016-03-10T13:43:50 | null | UTF-8 | Python | false | false | 7,094 | py | from django.conf import settings
from rest_framework.filters import BaseFilterBackend
from taggit.managers import _TaggableManager
from wagtail.wagtailcore.models import Page
from wagtail.wagtailsearch.backends import get_search_backend
from .utils import BadRequestError, pages_for_site
class FieldsFilter(BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
"""
This performs field level filtering on the result set
Eg: ?title=James Joyce
"""
fields = set(view.get_available_fields(queryset.model)).union({'id'})
for field_name, value in request.GET.items():
if field_name in fields:
field = getattr(queryset.model, field_name, None)
if isinstance(field, _TaggableManager):
for tag in value.split(','):
queryset = queryset.filter(**{field_name + '__name': tag})
# Stick a message on the queryset to indicate that tag filtering has been performed
# This will let the do_search method know that it must raise an error as searching
# and tag filtering at the same time is not supported
queryset._filtered_by_tag = True
else:
queryset = queryset.filter(**{field_name: value})
return queryset
class OrderingFilter(BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
"""
This applies ordering to the result set
Eg: ?order=title
It also supports reverse ordering
Eg: ?order=-title
And random ordering
Eg: ?order=random
"""
if 'order' in request.GET:
# Prevent ordering while searching
if 'search' in request.GET:
raise BadRequestError("ordering with a search query is not supported")
order_by = request.GET['order']
# Random ordering
if order_by == 'random':
# Prevent ordering by random with offset
if 'offset' in request.GET:
raise BadRequestError("random ordering with offset is not supported")
return queryset.order_by('?')
# Check if reverse ordering is set
if order_by.startswith('-'):
reverse_order = True
order_by = order_by[1:]
else:
reverse_order = False
# Add ordering
if order_by == 'id' or order_by in view.get_available_fields(queryset.model):
queryset = queryset.order_by(order_by)
else:
# Unknown field
raise BadRequestError("cannot order by '%s' (unknown field)" % order_by)
# Reverse order
if reverse_order:
queryset = queryset.reverse()
return queryset
class SearchFilter(BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
"""
This performs a full-text search on the result set
Eg: ?search=James Joyce
"""
search_enabled = getattr(settings, 'WAGTAILAPI_SEARCH_ENABLED', True)
if 'search' in request.GET:
if not search_enabled:
raise BadRequestError("search is disabled")
# Searching and filtering by tag at the same time is not supported
if getattr(queryset, '_filtered_by_tag', False):
raise BadRequestError("filtering by tag with a search query is not supported")
search_query = request.GET['search']
search_operator = request.GET.get('search_operator', None)
sb = get_search_backend()
queryset = sb.search(search_query, queryset, operator=search_operator)
return queryset
class ChildOfFilter(BaseFilterBackend):
"""
Implements the ?child_of filter used to filter the results to only contain
pages that are direct children of the specified page.
"""
def get_root_page(self, request):
return Page.get_first_root_node()
def get_page_by_id(self, request, page_id):
return Page.objects.get(id=page_id)
def filter_queryset(self, request, queryset, view):
if 'child_of' in request.GET:
try:
parent_page_id = int(request.GET['child_of'])
assert parent_page_id >= 0
parent_page = self.get_page_by_id(request, parent_page_id)
except (ValueError, AssertionError):
if request.GET['child_of'] == 'root':
parent_page = self.get_root_page(request)
else:
raise BadRequestError("child_of must be a positive integer")
except Page.DoesNotExist:
raise BadRequestError("parent page doesn't exist")
queryset = queryset.child_of(parent_page)
queryset._filtered_by_child_of = True
return queryset
class RestrictedChildOfFilter(ChildOfFilter):
"""
A restricted version of ChildOfFilter that only allows pages in the current
site to be specified.
"""
def get_root_page(self, request):
return request.site.root_page
def get_page_by_id(self, request, page_id):
site_pages = pages_for_site(request.site)
return site_pages.get(id=page_id)
class DescendantOfFilter(BaseFilterBackend):
"""
Implements the ?decendant_of filter which limits the set of pages to a
particular branch of the page tree.
"""
def get_root_page(self, request):
return Page.get_first_root_node()
def get_page_by_id(self, request, page_id):
return Page.objects.get(id=page_id)
def filter_queryset(self, request, queryset, view):
if 'descendant_of' in request.GET:
if getattr(queryset, '_filtered_by_child_of', False):
raise BadRequestError("filtering by descendant_of with child_of is not supported")
try:
parent_page_id = int(request.GET['descendant_of'])
assert parent_page_id >= 0
parent_page = self.get_page_by_id(request, parent_page_id)
except (ValueError, AssertionError):
if request.GET['descendant_of'] == 'root':
parent_page = self.get_root_page(request)
else:
raise BadRequestError("descendant_of must be a positive integer")
except Page.DoesNotExist:
raise BadRequestError("ancestor page doesn't exist")
queryset = queryset.descendant_of(parent_page)
return queryset
class RestrictedDescendantOfFilter(DescendantOfFilter):
"""
A restricted version of DecendantOfFilter that only allows pages in the current
site to be specified.
"""
def get_root_page(self, request):
return request.site.root_page
def get_page_by_id(self, request, page_id):
site_pages = pages_for_site(request.site)
return site_pages.get(id=page_id)
| [
"karlhobley10@gmail.com"
] | karlhobley10@gmail.com |
a9f7c3d75ffd672e277c27e70adee3e33c3e9510 | 8076de02ad53ea7b6328f819ae23e212f3a7d47c | /DXCTraining/Examples/4OOP/Inheritence/first.py | ada7ce1d8a65e48bcc5d991006dd997c7906d4cc | [] | no_license | rhitik26/python | f6013d978cbfc83c211b0e4e9aa92ee43a1b488f | b667c6502c6a1cb58b79ddd9d30a752f92da1f94 | refs/heads/master | 2020-09-21T12:34:03.869549 | 2019-11-29T06:28:33 | 2019-11-29T06:28:33 | 224,790,343 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | def y():
pass
class A:
sam='test'
class Person:
count=0
def __init__(self,name):
Person.count+=1
self.name=name
def sayHi(self):
print('Hi '+self.name)
class Emp(Person,A):
def __init__(self,name,id):
super().__init__(name)
self.id=id
def sayHi(self):
super().sayHi()
print('Hello '+self.name)
e1 = Emp('Saravan' ,'007')
e1.sayHi()
#e1.sayHi()
#print(e1.__dict__)
#print(Emp.__dict__)
#print(Person.__dict__)
#print(Emp.__bases__)
#z=type('Foo', (), {'attrib': 'value'}) #meta class
| [
"rkhanna36@dxc.com"
] | rkhanna36@dxc.com |
c2ac269526081ba4c09e510388a319650a8b9b24 | 8a25ada37271acd5ea96d4a4e4e57f81bec221ac | /home/pi/GrovePi/Software/Python/others/temboo/Library/Basecamp/CompleteItem.py | 14b5086953de1a5f7782bc7413760851d2d6ada6 | [
"Apache-2.0",
"MIT"
] | permissive | lupyuen/RaspberryPiImage | 65cebead6a480c772ed7f0c4d0d4e08572860f08 | 664e8a74b4628d710feab5582ef59b344b9ffddd | refs/heads/master | 2021-01-20T02:12:27.897902 | 2016-11-17T17:32:30 | 2016-11-17T17:32:30 | 42,438,362 | 7 | 8 | null | null | null | null | UTF-8 | Python | false | false | 3,797 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# CompleteItem
# Marks a single, specified item in a To-do list as complete.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class CompleteItem(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the CompleteItem Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(CompleteItem, self).__init__(temboo_session, '/Library/Basecamp/CompleteItem')
def new_input_set(self):
return CompleteItemInputSet()
def _make_result_set(self, result, path):
return CompleteItemResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return CompleteItemChoreographyExecution(session, exec_id, path)
class CompleteItemInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the CompleteItem
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccountName(self, value):
"""
Set the value of the AccountName input for this Choreo. ((required, string) A valid Basecamp account name. This is the first part of the account's URL.)
"""
super(CompleteItemInputSet, self)._set_input('AccountName', value)
def set_ItemID(self, value):
"""
Set the value of the ItemID input for this Choreo. ((required, integer) The ID of the item to mark as complete.)
"""
super(CompleteItemInputSet, self)._set_input('ItemID', value)
def set_Password(self, value):
"""
Set the value of the Password input for this Choreo. ((required, password) The Basecamp account password. Use the value 'X' when specifying an API Key for the Username input.)
"""
super(CompleteItemInputSet, self)._set_input('Password', value)
def set_Username(self, value):
"""
Set the value of the Username input for this Choreo. ((required, string) A Basecamp account username or API Key.)
"""
super(CompleteItemInputSet, self)._set_input('Username', value)
class CompleteItemResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the CompleteItem Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (There is no structrued response from complete item requests.)
"""
return self._output.get('Response', None)
class CompleteItemChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return CompleteItemResultSet(response, path)
| [
"lupyuen@gmail.com"
] | lupyuen@gmail.com |
f8feb419c39656afeff8e906cb8a45211147ee2b | f5d1e8b54ddbc51a9ef1b868eee93096d9b0fbeb | /weapp/webapp/modules/cms/models.py | 4e7ad86001019d94d76bc9f0a091acc07944b931 | [] | no_license | chengdg/weizoom | 97740c121724fae582b10cdbe0ce227a1f065ece | 8b2f7befe92841bcc35e0e60cac5958ef3f3af54 | refs/heads/master | 2021-01-22T20:29:30.297059 | 2017-03-30T08:39:25 | 2017-03-30T08:39:25 | 85,268,003 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,740 | py | # -*- coding: utf-8 -*-
from datetime import datetime
from hashlib import md5
from django.db import models
from django.contrib.auth.models import Group, User
from django.db.models import signals
from django.conf import settings
from django.db.models import F
from core import dateutil
#########################################################################
# Category:文章分类
#########################################################################
class Category(models.Model):
owner = models.ForeignKey(User, related_name='owned_cms_categories')
name = models.CharField(max_length=256) #分类名
pic_url = models.CharField(max_length=1024, default='') #分类图片
display_index = models.IntegerField(default=1, db_index=True) #显示的排序
created_at = models.DateTimeField(auto_now_add=True) #添加时间
class Meta(object):
db_table = 'cms_category'
verbose_name = '文章分类'
verbose_name_plural = '文章分类'
#########################################################################
# Article:文章
#########################################################################
class Article(models.Model):
owner = models.ForeignKey(User, related_name='owned_cms_articles')
title = models.CharField(max_length=256) #标题
summary = models.CharField(max_length=256, default='') #摘要
content = models.TextField(default='') #内容
display_index = models.IntegerField(default=1, db_index=True) #显示的排序
created_at = models.DateTimeField(auto_now_add=True) #添加时间
class Meta(object):
db_table = 'cms_article'
verbose_name = '文章'
verbose_name_plural = '文章'
#########################################################################
# SpecialArticle:特殊文章
#########################################################################
class SpecialArticle(models.Model):
owner = models.ForeignKey(User, related_name='owned_cms_special_articles')
name = models.CharField(max_length=256) #内部名
title = models.CharField(max_length=256) #标题
content = models.TextField(default='') #内容
display_index = models.IntegerField(default=1, db_index=True) #显示的排序
created_at = models.DateTimeField(auto_now_add=True) #添加时间
class Meta(object):
db_table = 'cms_special_article'
verbose_name = '特殊文章'
verbose_name_plural = '特殊文章'
#########################################################################
# CategoryHasArticle:<category, article>关系
#########################################################################
class CategoryHasArticle(models.Model):
article = models.ForeignKey(Article)
category = models.ForeignKey(Category)
class Meta(object):
db_table = 'cms_category_has_article'
| [
"jiangzhe@weizoom.com"
] | jiangzhe@weizoom.com |
5955abba13969d2e6dbf080aa32f43a83df0882d | cd257631f442d24d2e4902cfb60d05095e7c49ad | /week-03/day-03/centered_square.py | 718aa19f4966f147193973ccae71489f2fde4ccd | [] | no_license | green-fox-academy/Chiflado | 62e6fc1244f4b4f2169555af625b6bfdda41a975 | 008893c63a97f4c28ff63cab269b4895ed9b8cf1 | refs/heads/master | 2021-09-04T03:25:25.656921 | 2018-01-15T09:02:47 | 2018-01-15T09:02:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 522 | py | from tkinter import *
root = Tk()
canvas = Canvas(root, width='300', height='300')
canvas.pack()
# draw a green 10x10 square to the center of the canvas.
canvas_width = 300
canvas_height = 300
edge_length = 10
starting_x = canvas_width / 2 - edge_length / 2
starting_y = canvas_height / 2 - edge_length / 2
ending_x = canvas_width / 2 + edge_length / 2
ending_y = canvas_height / 2 + edge_length / 2
green_square = canvas.create_rectangle( starting_x, starting_y, ending_x, ending_y, fill= 'green')
root.mainloop() | [
"prjevarabalazs@gmail.com"
] | prjevarabalazs@gmail.com |
705a6c711907b2fa6d7884a850e39de847ea32db | d750fb953abda6a965c4f307266b2405ad8c11b1 | /programers algorithm/LEVEL2/주식가격.py | 3c0fee793a8d39bb540a3b9695a7381a3d96d493 | [] | no_license | heaven324/Python | dbe8e57fa7741ab963af239474d108ff9dbdc0c7 | 065663fe1e5f86c9d08ec645e24b5fde2045fee1 | refs/heads/master | 2023-05-25T02:06:01.728138 | 2023-05-17T15:12:08 | 2023-05-17T15:12:08 | 188,010,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,343 | py | # solution
prices = [1, 2, 3, 2, 3]
def solution(prices):
result = []
for i in range(len(prices)-1):
cnt = 1
for j in range(i+1, len(prices)-1):
if prices[i] <= prices[j]:
cnt += 1
else:
break
result.append(cnt)
result.append(0)
return result
print(solution(prices))
'''
문제 설명
초 단위로 기록된 주식가격이 담긴 배열 prices가 매개변수로 주어질 때,
가격이 떨어지지 않은 기간은 몇 초인지를 return 하도록 solution 함수를 완성하세요.
제한사항
prices의 각 가격은 1 이상 10,000 이하인 자연수입니다.
prices의 길이는 2 이상 100,000 이하입니다.
입출력 예
prices return
[1, 2, 3, 2, 3] [4, 3, 1, 1, 0]
입출력 예 설명
1초 시점의 ₩1은 끝까지 가격이 떨어지지 않았습니다.
2초 시점의 ₩2은 끝까지 가격이 떨어지지 않았습니다.
3초 시점의 ₩3은 1초뒤에 가격이 떨어집니다. 따라서 1초간 가격이 떨어지지 않은 것으로 봅니다.
4초 시점의 ₩2은 1초간 가격이 떨어지지 않았습니다.
5초 시점의 ₩3은 0초간 가격이 떨어지지 않았습니다.
※ 공지 - 2019년 2월 28일 지문이 리뉴얼되었습니다.
'''
| [
"wjdtjdgh2005@gmail.com"
] | wjdtjdgh2005@gmail.com |
b2b77cda68bc763a8f080a5688d500e6503eeee5 | 58c8838461101f2252d17824e924ece7e93212d7 | /tests/cloudcli/test_server_history.py | 6b50c457b3b890abce3ed7070e178294fb14528b | [
"MIT"
] | permissive | imcvampire/kamateratoolbox | 9b03ac703c1dd996de3faad5520220d4e7db91f2 | 372853059c584bb6b80c59efca125e08352def0e | refs/heads/master | 2023-02-03T17:23:00.052904 | 2020-12-22T07:19:17 | 2020-12-22T07:19:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,421 | py | import datetime
from ..common import assert_only_one_server_cloudcli, assert_no_matching_servers_cloudcli, get_server_id
def test_server_history_only_one_server(cloudcli, session_server_powered_on, session_server_powered_off):
assert_only_one_server_cloudcli([session_server_powered_on, session_server_powered_off], cloudcli, ["server", "history"])
def test_server_history_no_matching_servers(cloudcli):
assert_no_matching_servers_cloudcli(cloudcli, ["server", "history"])
def test_server_history(cloudcli, temp_server):
print("Reboot server to have some history")
cloudcli("server", "reboot", "--name", temp_server["name"], "--wait")
res = cloudcli("server", "history", "--name", temp_server["name"], "--format", "json")
assert len(res) == 2
assert set(res[0].keys()) == {"date", "user", "action"}
assert datetime.datetime.strptime(res[0]["date"], "%d/%m/%Y %H:%M:%S").date() == datetime.datetime.now().date()
assert len(res[0]["user"]) > 3
assert len(res[0]["action"]) > 3
print("Get history by id")
res = cloudcli("server", "history", "--id", get_server_id(temp_server), "--format", "json")
assert len(res) == 2
assert set(res[0].keys()) == {"date", "user", "action"}
assert datetime.datetime.strptime(res[0]["date"], "%d/%m/%Y %H:%M:%S").date() == datetime.datetime.now().date()
assert len(res[0]["user"]) > 3
assert len(res[0]["action"]) > 3
| [
"ori@uumpa.com"
] | ori@uumpa.com |
bbf8b5718568d7b9ef2974b393b8ce361eeefe1f | 898f547bbeb7d1da27bc40e2d594a363c0d1a75a | /Leetcode Problems/lc1389e.py | bd96f9c3e848960611d528be93d5b379427f98f2 | [] | no_license | TerryLun/Code-Playground | 4e069e28c457309329f003ea249be83d7578a4a3 | 708ad69594cf5b9edc9ff1189716cad70916574c | refs/heads/master | 2023-06-20T14:03:43.924472 | 2021-07-23T05:27:48 | 2021-07-23T05:27:48 | 237,375,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | """
1389. Create Target Array in the Given Order
Given two arrays of integers nums and index. Your task is to create target array under the following rules:
Initially target array is empty.
From left to right read nums[i] and index[i], insert at index index[i] the value nums[i] in target array.
Repeat the previous step until there are no elements to read in nums and index.
Return the target array.
It is guaranteed that the insertion operations will be valid.
"""
def createTargetArray(nums, index):
target = []
for i, j in zip(index, nums):
if i >= len(target):
target.append(j)
else:
target.insert(i, j)
return target
| [
"tianweilun@yahoo.com"
] | tianweilun@yahoo.com |
1936160e12db29ad137d1f6effb6db365bd0ad5f | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5634697451274240_0/Python/MReni/RevengeOfThePancakes.py | f0bd9154a41a9e82d73ac0ce8564e51703f16a39 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 5,209 | py | import math
import collections
HAPPY_PANCAKE = '+'
UNHAPPY_PANCAKE = '-'
# pancake_min_flips = None
# pancake_map = {} # {pancake: {value: value, parents}}
pancake_value_map = {} #{pancake: steps to final}
pancake_parent_map = {} #{pancake: [parent1, parent2]}
super_parent = ""
def all_happy(pancakes):
return (UNHAPPY_PANCAKE not in pancakes)
def flip(pancakes, count):
flipped_pancakes = ''
for i in range(0, count):
this_pancake = HAPPY_PANCAKE if pancakes[i] == UNHAPPY_PANCAKE else UNHAPPY_PANCAKE
flipped_pancakes = flipped_pancakes + this_pancake
return flipped_pancakes[::-1] + pancakes[count:]
# def get_min_flips(pancakes, current_flips):
# if all_happy(pancakes):
# if not pancake_min_flips or pancake_min_flips > current_flips:
# pancake_min_flips = current_flips
# return current_flips
# min_flips = None
# while not all_happy(pancakes):
# for i in range(0, len(pancakes)):
# flipped_pancakes = flip(pancakes, i)
# current_flips = current_flips + 1
# if pancake_min_flips and current_flips > pancake_min_flips:
# continue
# # If we have seen this map and with smaller flips, use smaller flips
# if flipped_pancakes in pancake_map:
# if pancake_map[flipped_pancakes] > current_flips:
# final_flips = current_flips + pancake_map[flipped_pancakes]
# else:
# final_flips = get_min_flips(flipped_pancakes, current_flips)
# if current_min_flips and current_min_flips < final_flips:
# return current_flips
# if not min_flips or current_flips < min_flips :
# min_flips = current_flips
# return min_flips
# def get_min_flips(pancakes, depth):
# if all_happy(pancakes):
# # print "all happy"
# return 0
# if pancakes in pancake_map:
# return pancake_map[pancakes]
# current_min_flips = None
# for i in range(1, len(pancakes)+1):
# flipped_pancakes = flip(pancakes, i)
# # print "This is my local " + str(i) + " flip from " + pancakes + " to " + flipped_pancakes
# # print pancake_map
# if flipped_pancakes not in attempts or attempts[flipped_pancakes] > depth:
# attempts[flipped_pancakes] = depth
# future_min_flips = get_min_flips(flipped_pancakes, depth + 1)
# # print "Not in attempts and have a future min flip of " + str(future_min_flips)
# # print "Futures: " + str(i) + " :" + str(future_min_flips)
# # print "count: " + str(i) + " flipped_pancakes: " + flipped_pancakes + ". future_min_flips: " + str(future_min_flips)
# if future_min_flips != None:
# my_flip = 1 + future_min_flips
# # print "MYFlips: " + str(i) + " :" + str(my_flip)
# if pancakes not in pancake_map or pancake_map[pancakes] > my_flip:
# pancake_map[pancakes] = my_flip
# if current_min_flips == None or current_min_flips > my_flip:
# current_min_flips = my_flip
# # if current_min_flips != None and (pancakes not in pancake_map or pancake_map[pancakes] > current_min_flips):
# # pancake_map[pancakes] = current_min_flips
# return current_min_flips
def update_all_parents(pancakes, value, previous_value):
# old_parent_value + (value - previous_value + 1)
# Previous value either > current value or does not exist or = current value
if not pancakes:
return
for parent in pancake_parent_map[pancakes]:
old_parent_value = pancake_value_map[parent]
expected_new_value = (old_parent_value or 0) + (value - (previous_value or 0) + 1)
if old_parent_value == None or old_parent_value > expected_new_value:
pancake_value_map[parent] = expected_new_value
update_all_parents(parent, expected_new_value, old_parent_value)
def find_consecutive_chars(pancakes):
count = 1
first_char = pancakes[0]
for i in range(1, len(pancakes)):
if pancakes[i] == first_char:
count = count + 1
else:
return count
return count
def get_min_flips(pancakes, depth):
if all_happy(pancakes):
previous_value = pancake_value_map.get(pancakes, None)
if previous_value == None or previous_value > 0:
pancake_value_map[pancakes] = 0
update_all_parents(pancakes, 0, previous_value)
# return 0
if pancake_value_map.get(pancakes) != None:
update_all_parents(pancakes, pancake_value_map[pancakes], pancake_value_map[pancakes])
#find consecutive marks at the beginning:
min_count = find_consecutive_chars(pancakes)
for i in range(max(min_count, depth), len(pancakes) + 1):
flipped_pancakes = flip(pancakes, i)
if pancakes not in pancake_parent_map[flipped_pancakes] and flipped_pancakes not in pancake_value_map:# and flipped_pancakes != super_parent:
pancake_value_map[flipped_pancakes] = None
pancake_parent_map[flipped_pancakes].add(pancakes)
get_min_flips(flipped_pancakes, i )
count = 1
with open('dataB', 'rb') as data:
for pancakes in data:
# pancake_min_flips = None
# pancake_map[pancakes] = 0
pancakes = pancakes.replace('\n', '')
pancake_value_map = {pancakes: None}
pancake_parent_map = collections.defaultdict(set)
super_parent = pancakes
# print "Case #" + str(count) + ": " + str(get_min_flips(pancakes, None))
get_min_flips(pancakes, 1)
print "Case #" + str(count) + ": " + str(pancake_value_map[pancakes])
# print pancake_value_map
# print pancake_parent_map
# find_shortest(pancakes)
count = count + 1 | [
"alexandra1.back@gmail.com"
] | alexandra1.back@gmail.com |
aba1814fbd4b650bf347f3f9336a1f8bc4df1fea | a7146e71459408498cc6b735935ba508a2e43c90 | /examples/long_running_with_tm/long_running_with_tm/models.py | 262843b6b4672b04747ee8cce37579b9fca74d95 | [
"MIT"
] | permissive | timgates42/pyramid_celery | 8ae5ed583696a35c35ddb1589a77444bec6362f6 | cf8aa80980e42f7235ad361874d3c35e19963b60 | refs/heads/master | 2023-03-15T23:17:01.816146 | 2021-02-24T02:40:04 | 2021-02-24T02:40:04 | 251,593,921 | 0 | 0 | NOASSERTION | 2020-03-31T12:18:55 | 2020-03-31T12:18:55 | null | UTF-8 | Python | false | false | 390 | py | from sqlalchemy import (
Column,
Integer,
Text,
)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import (
scoped_session,
sessionmaker,
)
DBSession = scoped_session(sessionmaker())
Base = declarative_base()
class TaskItem(Base):
__tablename__ = 'tasks'
id = Column(Integer, primary_key=True)
task = Column(Text, unique=True)
| [
"sontek@gmail.com"
] | sontek@gmail.com |
6661c970ed8204123d5f74af8add6e78011ed805 | 75275e1cd5ef1a5dddd5fdcb82db03fdf1b609d3 | /lib/ansible/modules/cloud/alicloud/alicloud_slb_vsg_facts.py | f191838ff20a57f9e843348e165ea5a4860f141c | [
"Apache-2.0"
] | permissive | jumping/ansible-provider | bc8b2bc51aa422de89d255ba1208ba8e8ae8f0be | 067ce1aa4277720bc481c2ba08e3d1b408b8f13c | refs/heads/master | 2020-03-13T21:30:50.287049 | 2018-04-27T13:12:23 | 2018-04-27T13:12:23 | 131,297,789 | 0 | 0 | Apache-2.0 | 2018-04-27T13:12:24 | 2018-04-27T13:07:37 | Python | UTF-8 | Python | false | false | 7,586 | py | #!/usr/bin/python
# Copyright (c) 2017 Alibaba Group Holding Limited. He Guimin <heguimin36@163.com.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see http://www.gnu.org/licenses/.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: alicloud_slb_vsg_facts
version_added: "2.4"
short_description: Gather facts on vserver group of Alibaba Cloud SLB.
description:
- This module fetches data from the Open API in Alicloud.
The module must be called from within the SLB vserver group itself.
options:
load_balancer_id:
description:
- ID of server load balancer.
required: true
aliases: [ "lb_id"]
vserver_group_ids:
description:
- A list of SLB vserver group ids.
required: false
aliases: [ "group_ids" ]
author:
- "He Guimin (@xiaozhu36)"
requirements:
- "python >= 2.6"
- "footmark"
extends_documentation_fragment:
- alicloud
'''
EXAMPLES = '''
# Fetch slb server group according to setting different filters
- name: Fetch slb vserver group example
hosts: localhost
vars:
alicloud_access_key: <your-alicloud-access-key>
alicloud_secret_key: <your-alicloud-secret-key>
alicloud_region: cn-beijing
load_balancer_id: lb-dj1hv3n9oemvk34evb466
vserver_group_ids:
- rsp-dj1lrpsgr8d5v
- rsp-dj10xmgq31vl0
tasks:
- name: Find all vserver gorup in specified slb
alicloud_slb_vsg_facts:
alicloud_access_key: '{{ alicloud_access_key }}'
alicloud_secret_key: '{{ alicloud_secret_key }}'
alicloud_region: '{{ alicloud_region }}'
load_balancer_id: '{{ load_balancer_id }}'
register: all_vserver_group
- debug: var=all_vserver_group
- name: Find all vserver group by ids
alicloud_slb_vsg_facts:
alicloud_access_key: '{{ alicloud_access_key }}'
alicloud_secret_key: '{{ alicloud_secret_key }}'
alicloud_region: '{{ alicloud_region }}'
load_balancer_id: '{{ load_balancer_id }}'
vserver_group_ids: '{{ vserver_group_ids }}'
register: vserver_group_by_ids
- debug: var=vserver_group_by_ids
'''
RETURN = '''
vserver_group_ids:
description: List all vserver group's id after operating slb vserver group.
returned: when success
type: list
sample: [ "rsp-dj1lrpsgr8d5v", "rsp-dj10xmgq31vl0" ]
vserver_groups:
description: Details about the slb vserver group that were created.
returned: when success
type: list
sample: [
{
"backend_servers": {
"backend_server": [
{
"port": 8282,
"server_id": "i-2ze35dldjc05dcvezgwk",
"weight": 100
},
{
"port": 8283,
"server_id": "i-2zehjm3jvtbkp175c2bt",
"weight": 100
}
]
},
"vserver_group_id": "rsp-dj1lrpsgr8d5v",
"vserver_group_name": "group_1"
},
{
"backend_servers": {
"backend_server": [
{
"port": 8085,
"server_id": "i-2zehjm3jvtbkp175c2bt",
"weight": 100
},
{
"port": 8086,
"server_id": "i-2ze35dldjc05dcvezgwk",
"weight": 100
}
]
},
"vserver_group_id": "rsp-dj10xmgq31vl0",
"vserver_group_name": "group_2"
}
]
total:
description: The number of all vserver group after operating slb.
returned: when success
type: int
sample: 2
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.alicloud_ecs import get_acs_connection_info, ecs_argument_spec, slb_connect
HAS_FOOTMARK = False
try:
from footmark.exception import SLBResponseError
HAS_FOOTMARK = True
except ImportError:
HAS_FOOTMARK = False
def get_info(obj):
"""
get info from vsg object
:param obj: vsg obj
:return: res: info of vsg
"""
res = {'vserver_group_id': obj.vserver_group_id}
if hasattr(obj, 'backend_servers'):
res['backend_servers'] = obj.backend_servers
if hasattr(obj, 'vserver_group_name'):
res['vserver_group_name'] = obj.vserver_group_name
return res
def main():
argument_spec = ecs_argument_spec()
argument_spec.update(dict(
load_balancer_id=dict(type='str', aliases=['lb_id'], required=True),
vserver_group_ids=dict(type='list', aliases=['group_ids'])
))
module = AnsibleModule(argument_spec=argument_spec)
if HAS_FOOTMARK is False:
module.fail_json(msg="Package 'footmark' required for this module.")
load_balancer_id = module.params['load_balancer_id']
vserver_group_ids = module.params['vserver_group_ids']
ids = []
result = []
all_vserver_group_ids = []
if vserver_group_ids and (not isinstance(vserver_group_ids, list) or len(vserver_group_ids)) < 1:
module.fail_json(msg='vserver_group_ids should be a list of vserver group ids, aborting')
try:
slb = slb_connect(module)
laod_balancer = slb.describe_load_balancers(load_balancer_id=load_balancer_id)
if laod_balancer and len(laod_balancer) == 1:
# list all vserver groups in selected load balancer
for vserver_group_obj in slb.describe_vserver_groups(load_balancer_id=load_balancer_id):
all_vserver_group_ids.append(vserver_group_obj.vserver_group_id)
# if list of vserver group id provided
if vserver_group_ids:
for vserver_group_id in vserver_group_ids:
# check whether provided vserver grooup id is valid or not
if vserver_group_id in all_vserver_group_ids:
vserver_group = slb.describe_vserver_group_attribute(vserver_group_id)
result.append(get_info(vserver_group))
ids.append(vserver_group_id)
# list all vserver group in specified slb
else:
for vserver_group_id in all_vserver_group_ids:
vserver_group = slb.describe_vserver_group_attribute(vserver_group_id)
result.append(get_info(vserver_group))
ids.append(vserver_group.vserver_group_id)
module.exit_json(changed=False, vserver_group_ids=ids,
vserver_groups=result, total=len(result))
else:
module.fail_json(msg="Unable to describe slb vserver groups, invalid load balancer id")
except Exception as e:
module.fail_json(msg=str("Unable to describe slb vserver group, error:{0}".format(e)))
if __name__ == '__main__':
main()
| [
"guimin.hgm@alibaba-inc.com"
] | guimin.hgm@alibaba-inc.com |
800eb0325b5ed7eb9f9f40151ce5297efa61fbbd | bab4f301ff7b7cf0143d82d1052f49e8632a210e | /98. Validate Binary Search Tree.py | bc811609d234bd06fd090df9f688e7930a212199 | [] | no_license | ashish-c-naik/leetcode_submission | 7da91e720b14fde660450674d6ce94c78b1150fb | 9f5dcd8e04920d07beaf6aa234b9804339f58770 | refs/heads/master | 2020-04-05T05:12:03.656621 | 2019-06-08T17:30:22 | 2019-06-08T17:30:22 | 156,585,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 626 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def isValidBST(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
def valid(root,mini,maxi):
if root:
if mini < root.val < maxi:
return valid(root.right,root.val,maxi) and valid(root.left,mini,root.val)
else:
return False
return True
return valid(root,float('-inf'),float('inf')) | [
"ashishnaik121@gmail.com"
] | ashishnaik121@gmail.com |
23d4e5aa40696d6550bcbaa74f288227bb8f13b0 | 974d04d2ea27b1bba1c01015a98112d2afb78fe5 | /test/ir/inference/test_groupnorm_act_pass_fuse_pass.py | c9f821b21d4e93fb3e366f54c555278279cf2643 | [
"Apache-2.0"
] | permissive | PaddlePaddle/Paddle | b3d2583119082c8e4b74331dacc4d39ed4d7cff0 | 22a11a60e0e3d10a3cf610077a3d9942a6f964cb | refs/heads/develop | 2023-08-17T21:27:30.568889 | 2023-08-17T12:38:22 | 2023-08-17T12:38:22 | 65,711,522 | 20,414 | 5,891 | Apache-2.0 | 2023-09-14T19:20:51 | 2016-08-15T06:59:08 | C++ | UTF-8 | Python | false | false | 4,554 | py | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from functools import partial
import hypothesis.strategies as st
import numpy as np
from auto_scan_test import PassAutoScanTest
from program_config import OpConfig, ProgramConfig, TensorConfig
import paddle.inference as paddle_infer
class TestElementGNActPass(PassAutoScanTest):
#
# | fuse |
# groupnorm -> groupnorm(with_silu)
# | |
# silu
# |
#
#
def sample_predictor_configs(self, program_config):
# trt dynamic_shape
config = self.create_trt_inference_config()
config.enable_tensorrt_engine(
max_batch_size=1,
workspace_size=102400,
min_subgraph_size=0,
precision_mode=paddle_infer.PrecisionType.Half,
use_static=False,
use_calib_mode=False,
)
config.set_trt_dynamic_shape_info(
{
"input_data": [1, 160, 1, 1],
},
{
"input_data": [4, 1280, 64, 64],
},
{
"input_data": [1, 320, 32, 32],
},
)
yield config, ['group_norm'], (3e-3, 1e-3)
def sample_program_config(self, draw):
axis = draw(st.sampled_from([0, -1]))
epsilon = draw(st.floats(min_value=0.0000001, max_value=0.001))
batch_size = draw(st.integers(min_value=1, max_value=4))
groups = draw(st.sampled_from([4, 8, 16, 32]))
hw = draw(st.sampled_from([1, 8, 16, 32]))
channel = draw(st.sampled_from([320, 1280]))
def generate_input(attrs):
return np.random.random(
[attrs[1]["batch_size"], *attrs[1]["input_dim"]]
).astype(np.float32)
def generate_weight(attrs):
return np.random.random(attrs[1]['input_dim'][0]).astype(np.float32)
attrs = [
{
'epsilon': epsilon,
'groups': groups,
},
{
'batch_size': batch_size,
'input_dim': [channel, hw, hw],
},
]
group_norm_op = OpConfig(
type="group_norm",
inputs={
"X": ["input_data"],
"Bias": ["group_norm_bias"],
"Scale": ["group_norm_scale"],
},
outputs={
"Y": ["group_norm_output1"],
"Mean": ["group_norm_output2"],
"Variance": ["group_norm_output3"],
},
attrs={
"data_layout": "NCHW",
"groups": attrs[0]["groups"],
"epsilon": attrs[0]["epsilon"],
},
)
silu_op = OpConfig(
type="silu",
inputs={
"X": ["group_norm_output1"],
},
outputs={
"Out": ["silu_output"],
},
)
program_config = ProgramConfig(
ops=[
group_norm_op,
silu_op,
],
weights={
"group_norm_bias": TensorConfig(
data_gen=partial(generate_weight, attrs)
),
"group_norm_scale": TensorConfig(
data_gen=partial(generate_weight, attrs)
),
},
inputs={
"input_data": TensorConfig(
data_gen=partial(generate_input, attrs)
),
},
outputs=["silu_output"],
)
return program_config
def test(self):
self.run_and_statis(
quant=False,
max_examples=50,
passes=["groupnorm_act_pass"],
max_duration=250,
min_success_num=50,
)
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | PaddlePaddle.noreply@github.com |
5d2f65939d336b320d6390d414834f0d5e24b0be | 2ce3ef971a6d3e14db6615aa4da747474d87cc5d | /练习/CMB/02_03 string_test/string_test_lyh.py | 153d9a8d0141a302596cb45801712394292ac494 | [] | no_license | JarvanIV4/pytest_hogwarts | 40604245807a4da5dbec2cb189b57d5f76f5ede3 | 37d4bae23c030480620897583f9f5dd69463a60c | refs/heads/master | 2023-01-07T09:56:33.472233 | 2020-11-10T15:06:13 | 2020-11-10T15:06:13 | 304,325,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 772 | py | import re
string1="LiuYongHua"
def reverse_string(str):
revers_str=str[::-1]
revers_list=[]
for i in range(len(revers_str)):
if revers_str[i]==revers_str[i].upper():
revers_list.append(revers_str[i].lower())
else:
revers_list.append(revers_str[i].upper())
return ''.join(revers_list)
#print(reverse_string(string1))
string2="agf23bss43dsfds6fd4"
def new_num_str(str):
new_list=[]
for char in str:
if char>="0" and char<="9":
new_list.append(char)
return "".join(new_list)
#print(new_num_str(string2))
string3="DSabABaassBA"
string4="ab"
def count_str(str1,str2):
str1=str1.upper()
str2=str2.upper()
count=str1.count(str2)
return count
print(count_str(string3,string4)) | [
"2268035948@qq.com"
] | 2268035948@qq.com |
42b65f1d1f9cc2d93d1e76c4b9d4e9c9f6f48bba | 22956a21b0b3ffe69c5618a7ef53683e4f73b483 | /busstopped-gae/lib/wtforms/__init__.py | 954ff22aba94d1172ff9785c67c417f1fb682cc4 | [] | no_license | humitos/bus-stopped | b397c3c47d8bd4b0b713389b3a0f47b7aa573762 | e49e6ce0b20ebc5f19fb7374216c082b0b12a962 | refs/heads/master | 2021-01-17T05:53:51.795324 | 2011-03-28T15:11:27 | 2011-03-28T15:11:27 | 1,435,952 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | """
WTForms
=======
WTForms is a flexible forms validation and rendering library for python web
development.
:copyright: Copyright (c) 2010 by Thomas Johansson, James Crasta and others.
:license: BSD, see LICENSE.txt for details.
"""
from wtforms import validators, widgets
from wtforms.fields import *
from wtforms.form import Form
from wtforms.validators import ValidationError
__version__ = '0.6.3dev'
| [
"humitos@gmail.com"
] | humitos@gmail.com |
380f5cd110c93d9c0100e713830642b815bad83d | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/51/usersdata/82/21229/submittedfiles/listas.py | 9397d4c26e6dfec440597fe791ca4bf0c5754c4b | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
def maiorDegrau(a):
for i in range (0,len(a)-1,1):
degrau = math.fabs(a[i]-a[i+1])
if degrau>maiorDegrau:
maior=maiorDegrau
return
a=[]
n = input ('Digite o valor de n:')
for i in range (0,n,1):
a.append (input('Digite o valor de a:'))
if maiorDegrau(a):
print ('S')
else:
print ('N')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
d0087db78a7bc3a955916ca6d7840764df89d2e4 | 27acd9eeb0d2b9b6326cc0477e7dbb84341e265c | /test/vraag4/src/yahtzee/43.py | d64a07e55e549fa6d710248f28ee87831f6d9bf7 | [] | no_license | VerstraeteBert/algos-ds | e0fe35bc3c5b7d8276c07250f56d3719ecc617de | d9215f11cdfa1a12a3b19ade3b95fa73848a636c | refs/heads/master | 2021-07-15T13:46:58.790446 | 2021-02-28T23:28:36 | 2021-02-28T23:28:36 | 240,883,220 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | def histogram(stenen):
histogram={}
stenen.sort()
for element in stenen:
aantal=stenen.count(element)
histogram[element]=aantal
return histogram
def max_gelijk(stenen):
max=0
for element in stenen:
aantal=stenen.count(element)
if aantal>max:
max=aantal
return max
def is_FullHouse(stenen):
trio=False
duo=False
for element in stenen:
aantal=stenen.count(element)
if aantal==3:
trio=True
if aantal==2:
duo=True
if trio==True and duo==True:
return True
else:
return False | [
"bertverstraete22@gmail.com"
] | bertverstraete22@gmail.com |
5c3c7c56558b2a063516442c59fdfda684584660 | f8eefef177c4794392ddbad008a67b10e14cb357 | /common/python/ax/kubernetes/swagger_client/models/v1_config_map_volume_source.py | fd67a4624e8f6be40321995d40eca7c9cc399a89 | [
"Apache-2.0"
] | permissive | durgeshsanagaram/argo | 8c667c7e64721f149194950f0d75b27efe091f50 | 8601d652476cd30457961aaf9feac143fd437606 | refs/heads/master | 2021-07-10T19:44:22.939557 | 2017-10-05T18:02:56 | 2017-10-05T18:02:56 | 105,924,908 | 1 | 0 | null | 2017-10-05T18:22:21 | 2017-10-05T18:22:20 | null | UTF-8 | Python | false | false | 6,776 | py | # coding: utf-8
"""
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1ConfigMapVolumeSource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, name=None, items=None, default_mode=None, optional=None):
"""
V1ConfigMapVolumeSource - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'name': 'str',
'items': 'list[V1KeyToPath]',
'default_mode': 'int',
'optional': 'bool'
}
self.attribute_map = {
'name': 'name',
'items': 'items',
'default_mode': 'defaultMode',
'optional': 'optional'
}
self._name = name
self._items = items
self._default_mode = default_mode
self._optional = optional
@property
def name(self):
"""
Gets the name of this V1ConfigMapVolumeSource.
Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names
:return: The name of this V1ConfigMapVolumeSource.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1ConfigMapVolumeSource.
Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names
:param name: The name of this V1ConfigMapVolumeSource.
:type: str
"""
self._name = name
@property
def items(self):
"""
Gets the items of this V1ConfigMapVolumeSource.
If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.
:return: The items of this V1ConfigMapVolumeSource.
:rtype: list[V1KeyToPath]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this V1ConfigMapVolumeSource.
If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.
:param items: The items of this V1ConfigMapVolumeSource.
:type: list[V1KeyToPath]
"""
self._items = items
@property
def default_mode(self):
"""
Gets the default_mode of this V1ConfigMapVolumeSource.
Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
:return: The default_mode of this V1ConfigMapVolumeSource.
:rtype: int
"""
return self._default_mode
@default_mode.setter
def default_mode(self, default_mode):
"""
Sets the default_mode of this V1ConfigMapVolumeSource.
Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
:param default_mode: The default_mode of this V1ConfigMapVolumeSource.
:type: int
"""
self._default_mode = default_mode
@property
def optional(self):
"""
Gets the optional of this V1ConfigMapVolumeSource.
Specify whether the ConfigMap or it's keys must be defined
:return: The optional of this V1ConfigMapVolumeSource.
:rtype: bool
"""
return self._optional
@optional.setter
def optional(self, optional):
"""
Sets the optional of this V1ConfigMapVolumeSource.
Specify whether the ConfigMap or it's keys must be defined
:param optional: The optional of this V1ConfigMapVolumeSource.
:type: bool
"""
self._optional = optional
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1ConfigMapVolumeSource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"elee@applatix.com"
] | elee@applatix.com |
60c9c060f101e760e8b9acf844231a85212ed025 | 4ad94b71e30883d6df07a3277265bd6fb7457ba7 | /python/examples/working_with_datasets/polygons2.py | 5f0987ea17d5e5bedab0c3c064af72fa075101de | [
"MIT"
] | permissive | Tecplot/handyscripts | 7cb1d4c80f323c785d06b0c8d37aeb0acb67f58c | 84a89bfecff5479a0319f08eb8aa9df465283830 | refs/heads/master | 2023-08-22T15:29:22.629644 | 2023-08-12T01:19:59 | 2023-08-12T01:19:59 | 149,826,165 | 89 | 64 | MIT | 2022-01-13T01:11:02 | 2018-09-21T22:47:23 | Jupyter Notebook | UTF-8 | Python | false | false | 4,514 | py | """Polygonal Finite-element Data Creation (Part 2)
This script creates a quad of two triangles just as in Part 1, however they are
placed into two different zones. Boundary connections are then made to stitch
the two triangles together.
The data created looks like this::
Node positions (x,y,z):
(1,1,1)
*
/ \
/ \
(0,1,.5) *-----* (1,0,.5)
\ /
\ /
*
(0,0,0)
The two triangles will have separate nodes at the shared locations::
Nodes:
1
Zone 1: / \
/ \
2-----0
2-----1
\ /
Zone 0: \ /
0
The left/right element indices are zero-based. A value of :math:`-1` indicates
no neighboring element while values :math:`(-2, -3, -4 ...)` indicate indices
into the boundary elements array :math:`(0, 1, 2 ...)`.
"""
import itertools as it
import tecplot as tp
from tecplot.constant import *
# Run this script with "-c" to connect to Tecplot 360 on port 7600
# To enable connections in Tecplot 360, click on:
# "Scripting..." -> "PyTecplot Connections..." -> "Accept connections"
import sys
if '-c' in sys.argv:
tp.session.connect()
# First Triangle
# Nodes are in (x,y,z)
nodes0 = ((0, 0, 0), (1, 0, 0.5), (0, 1, 0.5))
scalar_data0 = (0, 1, 2)
# Triangle faces (lines)
faces0 = ((0, 1), (1, 2), (2, 0))
# The (left elements, right elements) adjacent to each face
elements0 = ((0, 0, 0), (-1, -2, -1))
# Get the number of elements by the maximum index in elements0
num_elements0 = 1
# One boundary element neighboring the
# first element (index 0)
# of the second zone (index 1)
boundary_elems0 = ((0,),)
boundary_zones0 = ((1,),)
# Second Triangle
nodes1 = ((1, 0, 0.5), (1, 1, 1), (0, 1, 0.5))
scalar_data1 = (1, 3, 2)
faces1 = ((0, 1), (1, 2), (2, 0))
elements1 = ((0, 0, 0), (-1, -1, -2))
num_elements1 = 1
# One boundary element neighboring the
# first element (index 0)
# of the first zone (index 0)
boundary_elems1 = ((0,),)
boundary_zones1 = ((0,),)
# Create the dataset and zones
# Make sure to set the connectivity before any plot or style change.
ds = tp.active_frame().create_dataset('Data', ['x','y','z','s'])
z0 = ds.add_poly_zone(ZoneType.FEPolygon,
name='0: FE Polygon Float (3,1,3) Nodal',
num_points=len(nodes0),
num_elements=num_elements0,
num_faces=len(faces0))
z1 = ds.add_poly_zone(ZoneType.FEPolygon,
name='1: FE Polygon Float (3,1,3) Nodal',
num_points=len(nodes1),
num_elements=num_elements1,
num_faces=len(faces1))
# Fill in and connect first triangle
z0.values('x')[:] = [n[0] for n in nodes0]
z0.values('y')[:] = [n[1] for n in nodes0]
z0.values('z')[:] = [n[2] for n in nodes0]
z0.values('s')[:] = scalar_data0
# Fill in and connect second triangle
z1.values('x')[:] = [n[0] for n in nodes1]
z1.values('y')[:] = [n[1] for n in nodes1]
z1.values('z')[:] = [n[2] for n in nodes1]
z1.values('s')[:] = scalar_data1
# Set face neighbors
z0.facemap.set_mapping(faces0, elements0, boundary_elems0, boundary_zones0)
z1.facemap.set_mapping(faces1, elements1, boundary_elems1, boundary_zones1)
# Write data out in tecplot text format
tp.data.save_tecplot_ascii('polygons2.dat')
### Now we setup a nice view of the data
plot = tp.active_frame().plot(PlotType.Cartesian3D)
plot.activate()
plot.contour(0).colormap_name = 'Sequential - Yellow/Green/Blue'
plot.contour(0).colormap_filter.distribution = ColorMapDistribution.Continuous
for ax in plot.axes:
ax.show = True
plot.show_mesh = False
plot.show_contour = True
plot.show_edge = True
plot.use_translucency = True
fmaps = plot.fieldmaps()
fmaps.surfaces.surfaces_to_plot = SurfacesToPlot.All
fmaps.effects.surface_translucency = 40
# View parameters obtained interactively from Tecplot 360
plot.view.distance = 10
plot.view.width = 2
plot.view.psi = 80
plot.view.theta = 30
plot.view.alpha = 0
plot.view.position = (-4.2, -8.0, 2.3)
# Showing mesh, we can see all the individual triangles
plot.show_mesh = True
fmaps.mesh.line_pattern = LinePattern.Dashed
# ensure consistent output between interactive (connected) and batch
plot.contour(0).levels.reset_to_nice()
tp.export.save_png('polygons2.png', 600, supersample=3)
| [
"55457608+brandonmarkham@users.noreply.github.com"
] | 55457608+brandonmarkham@users.noreply.github.com |
be9b797e8c84eb118ee2d5bd847543beea765636 | 548f9594d6634b4f814d8ee3fa9ea6fb8c612bda | /examples/simple/config/config_local.py | 51dcfb603619572c6ab622b9334849a25034ad28 | [] | no_license | wp-fei/algorithm-base | 994a10a94c11a2ccc0e076fc76d5d12612832349 | a54d8a192e364b02514cf1119761d0cb41790d9b | refs/heads/master | 2023-08-19T06:36:59.142576 | 2021-10-14T03:48:31 | 2021-10-14T03:48:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 989 | py | """
本地环境配置
"""
enable_calllimit = False
PORT = 8000
workers = 2
# max_requests = 2
# max_requests_jitter = 1
# preload_app = False
# keepalive = 2
timeout = 100
# 固定日志位置,不要修改日志目录
# accesslog='logs/access.log'
# errorlog='logs/error.log'
# accesslog='logs/faccess.log'
# errorlog='logs/ferror.log'
APP_NAME = 'simple'
# REGISTER_AT_EUREKA = True
# EUREKA_SERVER = "http://127.0.0.1:7001/eureka/"
preload_app = True
# 是否开启存活检查
ENABLE_LIVENESS_PROB = False
LIVENESS_PROB = {
# 容器启动后要等待多少秒后存活和就绪探测器才被初始化,最小值是 0。
"initialDelaySeconds": 2,
# 执行探测的时间间隔(单位是秒)。最小值是 1。
"periodSeconds": 5,
# 探测的超时后等待多少秒。最小值是 1。
"timeoutSeconds": 1,
# 当连续失败N次后,重启容器
"failureThreshold": 3,
}
# REDIS = {
# 'host': 'localhost',
# 'port': 6379,
# }
| [
"pengxu.hpx@alibaba-inc.com"
] | pengxu.hpx@alibaba-inc.com |
335ef4cdde8d85825c1c527334f631a489ffa8db | 00b762e37ecef30ed04698033f719f04be9c5545 | /scripts/test_results/pipenv_test_results/conflicts/1_test_project_expected.py | d299260e39f9888bddc99b106636343d37d0a55c | [] | no_license | kenji-nicholson/smerge | 4f9af17e2e516333b041727b77b8330e3255b7c2 | 3da9ebfdee02f9b4c882af1f26fe2e15d037271b | refs/heads/master | 2020-07-22T02:32:03.579003 | 2018-06-08T00:40:53 | 2018-06-08T00:40:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,834 | py | # -*- coding=utf-8 -*-
import io
import pytest
import os
from pipenv.project import Project
from pipenv.utils import temp_environ
from pipenv.patched import pipfile
@pytest.mark.project
@pytest.mark.sources
@pytest.mark.environ
def test_pipfile_envvar_expansion(PipenvInstance):
with PipenvInstance(chdir=True) as p:
with temp_environ():
with open(p.pipfile_path, 'w') as f:
f.write("""
[[source]]
url = 'https://${TEST_HOST}/simple'
verify_ssl = false
name = "pypi"
[packages]
pytz = "*"
""".strip())
os.environ['TEST_HOST'] = 'localhost:5000'
project = Project()
assert project.sources[0]['url'] == 'https://localhost:5000/simple'
assert 'localhost:5000' not in str(pipfile.load(p.pipfile_path))
@pytest.mark.project
@pytest.mark.sources
@pytest.mark.parametrize('lock_first', [True, False])
def test_get_source(PipenvInstance, pypi, lock_first):
with PipenvInstance(pypi=pypi, chdir=True) as p:
with open(p.pipfile_path, 'w') as f:
contents = """
[[source]]
url = "{0}"
verify_ssl = false
name = "testindex"
[[source]]
url = "https://pypi.python.org/simple"
verify_ssl = "true"
name = "pypi"
[packages]
pytz = "*"
six = {{version = "*", index = "pypi"}}
[dev-packages]
""".format(os.environ['PIPENV_TEST_INDEX']).strip()
f.write(contents)
if lock_first:
# force source to be cached
c = p.pipenv('lock')
assert c.return_code == 0
project = Project()
sources = [
['pypi', 'https://pypi.python.org/simple'],
['testindex', os.environ.get('PIPENV_TEST_INDEX')]
]
for src in sources:
name, url = src
source = [s for s in project.pipfile_sources if s.get('name') == name]
assert source
source = source[0]
assert source['name'] == name
assert source['url'] == url
assert sorted(source.items()) == sorted(project.get_source(name=name).items())
assert sorted(source.items()) == sorted(project.get_source(url=url).items())
assert sorted(source.items()) == sorted(project.find_source(name).items())
assert sorted(source.items()) == sorted(project.find_source(url).items())
@pytest.mark.install
@pytest.mark.project
@pytest.mark.parametrize('newlines', [u'\n', u'\r\n'])
def test_maintain_file_line_endings(PipenvInstance, pypi, newlines):
with PipenvInstance(pypi=pypi, chdir=True) as p:
# Initial pipfile + lockfile generation
c = p.pipenv('install pytz')
assert c.return_code == 0
# Rewrite each file with parameterized newlines
for fn in [p.pipfile_path, p.lockfile_path]:
with io.open(fn) as f:
contents = f.read()
written_newlines = f.newlines
assert written_newlines == u'\n', '{0!r} != {1!r} for {2}'.format(
written_newlines, u'\n', fn,
)
# message because of https://github.com/pytest-dev/pytest/issues/3443
with io.open(fn, 'w', newline=newlines) as f:
f.write(contents)
# Run pipenv install to programatically rewrite
c = p.pipenv('install chardet')
assert c.return_code == 0
# Make sure we kept the right newlines
for fn in [p.pipfile_path, p.lockfile_path]:
with io.open(fn) as f:
f.read() # Consumes the content to detect newlines.
actual_newlines = f.newlines
assert actual_newlines == newlines, '{0!r} != {1!r} for {2}'.format(
actual_newlines, newlines, fn,
)
# message because of https://github.com/pytest-dev/pytest/issues/3443
| [
"srhee4@cs.washington.edu"
] | srhee4@cs.washington.edu |
bb4982fbff64c4005c1bd748e6ca93e826ddc357 | c04acaa6ee9c6a7c365e217bc78039fa9c77833e | /my_apps/web/migrations/0002_auto_20160913_0025.py | d61f89d3bc2746ac0e88efdd2c9d5907f5c0832d | [] | no_license | danielhuamani/django-la-cuzquena | 0386800d640b224d94b0fac2d83f999b60d7da85 | a6f4aaf44775b27328d073a65f1d0f50eff51fad | refs/heads/master | 2020-12-05T04:51:01.077860 | 2016-09-17T13:56:58 | 2016-09-17T13:56:58 | 67,900,351 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,232 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-09-13 05:25
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import filebrowser.fields
class Migration(migrations.Migration):
dependencies = [
('web', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Contacto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=120, verbose_name='Nombre')),
('telefono', models.CharField(max_length=20, verbose_name='Telefono')),
('Correo', models.EmailField(max_length=254, verbose_name='Correo')),
('mensaje', models.TextField(verbose_name='Mensaje')),
],
options={
'verbose_name': 'Contacto',
'verbose_name_plural': 'Contacto',
},
),
migrations.CreateModel(
name='MovilizarEmpresa',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=120, verbose_name='Nombre')),
('Correo', models.EmailField(max_length=254, verbose_name='Correo')),
('mensaje', models.TextField(verbose_name='Mensaje')),
],
options={
'verbose_name': 'Movilizar a tu Empresa',
'verbose_name_plural': 'Movilizar a tu Empresas',
},
),
migrations.CreateModel(
name='Nosotros',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nosotros_amarillo', models.TextField(verbose_name='Nosotros texto amarillo')),
('nosotros_plomo', models.TextField(verbose_name='Nosotros texto plomo')),
('banner', filebrowser.fields.FileBrowseField(max_length=200, verbose_name='Banner')),
('mision', models.TextField(verbose_name='Misi\xf3n')),
('vision', models.TextField(verbose_name='Visi\xf3n')),
],
options={
'verbose_name': 'Nosotros',
'verbose_name_plural': 'Nosotross',
},
),
migrations.CreateModel(
name='NuestrosServicios',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('posicion', models.IntegerField(default=0, verbose_name='Posicion')),
('imagen', filebrowser.fields.FileBrowseField(max_length=200, verbose_name='Servicios')),
('titulo', models.CharField(max_length=120, verbose_name='Titulo')),
('descripcion', models.TextField(verbose_name='Descripci\xf3n')),
],
options={
'verbose_name': 'Valores',
'verbose_name_plural': 'Valoress',
},
),
migrations.CreateModel(
name='Servicios',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('banner', filebrowser.fields.FileBrowseField(max_length=200, verbose_name='Servicios Banner')),
],
options={
'verbose_name': 'Nosotros',
'verbose_name_plural': 'Nosotross',
},
),
migrations.CreateModel(
name='Valores',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('posicion', models.IntegerField(default=0, verbose_name='Posicion')),
('imagen', filebrowser.fields.FileBrowseField(max_length=200, verbose_name='Valores')),
('titulo', models.CharField(max_length=120, verbose_name='Titulo')),
('descripcion', models.TextField(verbose_name='Descripci\xf3n')),
('nosotros', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='nosotros_valores', to='web.Nosotros')),
],
options={
'verbose_name': 'Valores',
'verbose_name_plural': 'Valoress',
},
),
migrations.CreateModel(
name='Vehiculos',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('posicion', models.IntegerField(default=0, verbose_name='Posicion')),
('vehiculo', filebrowser.fields.FileBrowseField(max_length=200, verbose_name='Vehiculos Imagen')),
('descripcion', models.TextField(verbose_name='Descripci\xf3n')),
],
options={
'verbose_name': 'Vehiculos',
'verbose_name_plural': 'Vehiculoss',
},
),
migrations.AlterField(
model_name='home',
name='nosotros_image',
field=filebrowser.fields.FileBrowseField(max_length=200, verbose_name='Nosostros Imagen'),
),
migrations.AlterField(
model_name='home',
name='servicios_image',
field=filebrowser.fields.FileBrowseField(max_length=200, verbose_name='Servicios Imagen'),
),
migrations.AlterField(
model_name='home',
name='vehiculos_image',
field=filebrowser.fields.FileBrowseField(max_length=200, verbose_name='Vehiculos Imagen'),
),
migrations.AlterField(
model_name='homebanner',
name='banner',
field=filebrowser.fields.FileBrowseField(max_length=200, verbose_name='Home Banner'),
),
migrations.AddField(
model_name='nuestrosservicios',
name='servicio',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='servicios_valores', to='web.Servicios'),
),
]
| [
"danielhuamani15@gmail.com"
] | danielhuamani15@gmail.com |
d56b4958a050b2475eab08b22b64f06f36724f1c | c0973d6939ef419ed3d261d95167d537499a553a | /tests/test_forex_strategy.py | 34ee96c1d1ee8c48e63da8ba1979bb1dabdcf0dc | [
"MIT"
] | permissive | mj3428/OnePy | 0c6e4be9b4bb36ae66b566dfa85cd44bae2a07de | 8dc13fc21502daa5786aecaa4451ccba32fc8a14 | refs/heads/master | 2020-04-05T10:28:33.550915 | 2018-11-08T04:07:05 | 2018-11-08T04:07:05 | 134,518,682 | 0 | 0 | MIT | 2018-05-23T05:38:12 | 2018-05-23T05:38:11 | null | UTF-8 | Python | false | false | 5,956 | py |
from collections import defaultdict
import OnePy as op
from OnePy.custom_module.cleaner_talib import Talib
class DemoTestStrategy(op.StrategyBase):
def __init__(self):
super().__init__()
self.params = dict(sma1=25,
sma2=9)
self.set_params(self.params)
def set_params(self, params: dict):
self.params = params
self.sma1 = Talib(ind='SMA', frequency='H1',
params=dict(timeperiod=params['sma1']),
buffer_day=20).calculate
self.sma2 = Talib(ind='SMA', frequency='D',
params=dict(timeperiod=params['sma2']),
buffer_day=20).calculate
def handle_bar(self):
for ticker in self.env.tickers:
sma1 = self.sma1(ticker)
sma2 = self.sma2(ticker)
if sma1 > sma2:
self.buy(1, ticker, takeprofit=10, stoploss=10)
self.buy(1, ticker, takeprofit_pct=0.01, trailingstop=10)
self.buy(1, ticker, price_pct=0.1, takeprofit_pct=0.01)
self.short(1, ticker, takeprofit=10, trailingstop_pct=0.03)
self.short(1, ticker, stoploss_pct=0.02)
else:
self.sell(1, ticker, price_pct=0.1)
self.sell(99, ticker)
self.cover(3, ticker, price_pct=0.02)
self.cancel_tst(ticker, 'long', takeprofit=True)
self.cancel_pending(ticker, 'long', above_price=0)
START, END = '2016-01-05', '2016-01-21'
FREQUENCY = 'M30'
TICKER_LIST = ['EUR_USD']
INITIAL_CASH = 2000
go = op.backtest.forex(TICKER_LIST, FREQUENCY,
INITIAL_CASH, START, END, 'oanda')
DemoTestStrategy()
# forward_analysis(go, START, END, 2, 3)
# go.forward_analysis.run(START, 3, 2, 5)
# go.show_today_signals()
# go.sunny()
# go.output.save_result('backtest_forex.pkl')
# go.output.summary2()
# go.output.analysis.trade_analysis()
# go.output.plot('EUR_USD')
# go.output.plot(TICKER_LIST, 'plotly')
# || 正在初始化OnePy
# || =============== OnePy初始化成功! ===============
# || 开始寻找OnePiece之旅~~~
# || cleaners警告,可能遇到周末导致无法next
# || cleaners警告,可能遇到周末导致无法next
# || cleaners警告,可能遇到周末导致无法next
# || cleaners警告,可能遇到周末导致无法next
# ||
# ||
# || +--------------------------------+
# || |Fromdate | 2016-01-05|
# || |Todate | 2016-01-21|
# || |Initial_Value | $2000.00|
# || |Final_Value | $1991.33|
# || |Total_Return | -0.433%|
# || |Max_Drawdown | 2.725%|
# || |Max_Duration | 14 days|
# || |Max_Drawdown_Date | 2016-01-20|
# || |Sharpe_Ratio | -1.37|
# || +--------------------------------+
# || +---------------------------------------+
# || |Start_date | 2016-01-05|
# || |End_date | 2016-01-21|
# || |Initial_balance | $2000.00|
# || |End_balance | $1991.33|
# || |Total_return | -0.43%|
# || |Total_net_pnl | -$8.67|
# || |Total_commission | $0.19|
# || |Total_trading_days | 15 days|
# || |Max_drawdown | 2.73%|
# || |Max_drawdown_date | 2016-01-20|
# || |Max_duration_in_drawdown | 14 days|
# || |Max_margin | $14.76|
# || |Max_win_holding_pnl | $13.26|
# || |Max_loss_holding_pnl | -$37.73|
# || |Sharpe_ratio | -1.37|
# || |Sortino_ratio | -1.96|
# || |Number_of_trades | 1264|
# || |Number_of_daily_trades | 84.27|
# || |Number_of_profit_days | 15 days|
# || |Number_of_loss_days | 0 days|
# || |Avg_daily_pnl | -$0.58|
# || |Avg_daily_commission | $0.01|
# || |Avg_daily_return | -0.03%|
# || |Avg_daily_std | -0.03%|
# || |Annual_compound_return | -7.52%|
# || |Annual_average_return | -7.82%|
# || |Annual_std | -0.48%|
# || |Annual_pnl | -$145.61|
# || +---------------------------------------+
# || All Trades Long Trades Short Trades
# || Total_number_of_trades 1264 632 632
# || Total_net_pnl -$7.84 -$11.54 $3.69
# || Ratio_avg_win_avg_loss 0.85 0.75 0.87
# || Profit_factor 0.69 0.29 1.43
# || Percent_profitable 45.02% 28.16% 61.87%
# || Number_of_winning_trades 569 178 391
# || Number_of_losing_trades 693 454 239
# || Max_holding_period 4.85 days 4.85 days 4.62 days
# || Max_consecutive_winning_trade 126 47 126
# || Max_consecutive_losing_trade 104 102 95
# || Largest_winning_trade $0.11 $0.11 $0.10
# || Largest_losing_trade -$0.12 -$0.10 -$0.12
# || Gross_profit $17.39 $4.75 $12.64
# || Gross_loss -$25.04 -$16.19 -$8.85
# || Gross_commission $0.19 $0.09 $0.09
# || Expectancy_adjusted_ratio -0.17 -0.51 0.16
# || Expectancy -$0.01 -$0.02 $0.01
# || Avg_winning_trade $0.03 $0.03 $0.03
# || Avg_net_pnl_per_trade -$0.01 -$0.02 $0.01
# || Avg_losing_trade -$0.04 -$0.04 -$0.04
# || Avg_holding_period 2.14 days 1.96 days 3.10 days
# || python tests/test_forex_strategy.py 4.84s user 0.35s system 92% cpu 5.622 total
# || [Finished in 5 seconds]
| [
"chenjiayicjy@126.com"
] | chenjiayicjy@126.com |
036fab2dffd9e95949f40381d15eede2b578ec55 | 000f57fa43ecf9f5353ca80ced3ad505698dbecb | /imagelib/images/admin.py | a94d0a8d37ef907f4ed79f5c646191a01efdafd6 | [] | no_license | kamal0072/imagegallary | 846e9ef43f6e0c42c98a4a4ad5cb22faef295936 | 91effde764710fd9bfc31b7dec238d143833e31e | refs/heads/master | 2023-05-02T07:32:35.805297 | 2021-05-21T05:35:05 | 2021-05-21T05:35:05 | 359,541,161 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | from django.contrib import admin
from .models import MyImage
@admin.register(MyImage)
class MyImageAdmin(admin.ModelAdmin):
list_display=['id','first_name','photo','date']
| [
"hasan.kamaal0072@gmail.com"
] | hasan.kamaal0072@gmail.com |
2225a843e02ca5a21e862f98265377310449758d | cd90bbc775cbce9a7e0bc46cbb9437e3961e587f | /misc/advent/2017/23/b.py | 4d6d170eea60181e64e6b15187262a04afe567f4 | [] | no_license | llimllib/personal_code | 7b3f0483589e2928bf994184e3413f4b887e1f0c | 4d4662d53e0ac293dea8a4208ccca4a1f272e64a | refs/heads/master | 2023-09-05T04:02:05.075388 | 2023-09-01T12:34:09 | 2023-09-01T12:34:09 | 77,958 | 9 | 16 | null | 2023-08-16T13:54:39 | 2008-11-19T02:04:46 | HTML | UTF-8 | Python | false | false | 1,685 | py | from collections import defaultdict
import sys
def go(inp):
registers = defaultdict(int)
registers['a'] = 1
instructions = list(i.strip() for i in inp)
ptr = 0
i = 0
muls = 0
while ptr < len(instructions):
if i % 1000000 == 0:
pass
#print(registers)
inst, a, b = instructions[ptr].split(" ")
# print(ptr, inst, a, b)
if instructions[ptr] == "jnz g 2" and registers['d'] * registers['e'] == registers['b']:
print(registers)
if inst == "set":
try:
registers[a] = int(b)
except ValueError:
# this casee is not discussed in the documentation?
registers[a] = registers[b]
elif inst == "mul":
muls += 1
try:
registers[a] *= int(b)
except ValueError:
registers[a] *= registers[b]
elif inst == "sub":
try:
registers[a] -= int(b)
except ValueError:
registers[a] -= registers[b]
elif inst == "jnz":
try:
val = int(a)
except ValueError:
val = registers[a]
if val != 0:
try:
ptr += int(b)
continue
except ValueError:
# also not discussed in the docs
ptr += registers[b]
continue
else:
raise TypeError("ohno")
# print(registers)
ptr += 1
i += 1
print(registers['h'])
if __name__ == "__main__":
go(open('jimmied.txt'))
| [
"bill@billmill.org"
] | bill@billmill.org |
35971ac7e0caac70225bae1f0be790575d72bcc8 | 1be0090ac65ee3c4ad12c9152886169d15fd6d0d | /tests/test_shelter.py | 17ad185362119c38c433f2483fbbc094c649d1f4 | [] | no_license | jreiher2003/Puppy-Adoption | ec652a5df68fd428605fc154b2e6fb1bf0f5d752 | 0d0bea549d174d903db9de1ca5be79412f333ea3 | refs/heads/master | 2021-01-10T09:26:38.652807 | 2016-04-03T18:32:17 | 2016-04-03T18:32:17 | 51,519,990 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,836 | py | import unittest
from base import BaseTestCase
from app.models import Shelter
class TestShelterCase(BaseTestCase):
def test_shelter_new_page(self):
response = self.client.get('/new-shelter', content_type='html/text')
self.assertEqual(response.status_code, 200)
self.assertIn(b'Add a shelter', response.data)
def test_shelter_add_new(self):
response = self.client.post('/new-shelter', data=dict(name='Greatshelter', address="321 Notreal st.", city="Nocity", state="Alabama", zipCode=54321, website="http://www.notreal.com", maximum_capacity=6, current_capacity=2), follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertIn(b"<strong>Congrats</strong> You just created a new shelter named <u>Greatshelter</u>.", response.data)
def test_shelter_add_new_error(self):
response = self.client.post('/new-shelter', data=dict(name='Greatshelter'),follow_redirects=True)
self.assertIn(b"This field is required.", response.data)
def test_shelter_database(self):
shelter = Shelter.query.filter_by(id=1).one()
self.assertEqual(shelter.id, 1)
self.assertEqual(shelter.name, 'Testshelter')
self.assertEqual(shelter.address, '123 Fake st.')
self.assertEqual(shelter.city, 'Fake')
self.assertEqual(shelter.state, 'Florida')
self.assertEqual(shelter.zipCode, '12345')
self.assertEqual(shelter.website, 'http://test.com')
self.assertEqual(shelter.maximum_capacity, 10)
self.assertEqual(shelter.current_capacity, 5)
def test_shelter_edit_page(self):
response = self.client.get('/1/testshelter/edit/', content_type='html/text')
self.assertEqual(response.status_code, 200)
self.assertIn(b'Edit a shelter', response.data)
def test_shelter_edit_post(self):
response = self.client.post('/1/testshelter/edit/', data=dict(name="Testshelter", address="123 Fake st.", city="Fake", state="Florida", zipCode="12345", website="http://test.com", maximum_capacity=10, current_capacity=6), follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertIn(b'<strong>Update</strong> on <u>Testshelter</u>.', response.data)
def test_shelter_delete_page(self):
response = self.client.get('/1/testshelter/delete/', content_type='html/text')
self.assertEqual(response.status_code, 200)
self.assertIn(b'Are you sure you want to close down <mark>Testshelter</mark>?', response.data)
def test_shelter_delete_post(self):
response = self.client.post('/1/testshelter/delete/', follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertIn(b'<strong>Successfully</strong> deleted shelter <u>Testshelter</u>.', response.data)
| [
"jreiher2003@yahoo.com"
] | jreiher2003@yahoo.com |
c5cf16f641ee307011a7892379838fa61d48d9d0 | 2aace9bb170363e181eb7520e93def25f38dbe5c | /build/idea-sandbox/system/python_stubs/cache/55af9580d6a61621d0d6de6e2bbfd43d14841968ee1dd01113aeb5bd2473a4cc/pyexpat/model.py | e5b10af34a44317d9c92488636663935e2daa366 | [] | no_license | qkpqkp/PlagCheck | 13cb66fd2b2caa2451690bb72a2634bdaa07f1e6 | d229904674a5a6e46738179c7494488ca930045e | refs/heads/master | 2023-05-28T15:06:08.723143 | 2021-06-09T05:36:34 | 2021-06-09T05:36:34 | 375,235,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | # encoding: utf-8
# module pyexpat.model
# from C:\Users\Doly\Anaconda3\lib\site-packages\tables\indexesextension.cp37-win_amd64.pyd
# by generator 1.147
""" Constants used to interpret content model information. """
# no imports
# Variables with simple values
XML_CQUANT_NONE = 0
XML_CQUANT_OPT = 1
XML_CQUANT_PLUS = 3
XML_CQUANT_REP = 2
XML_CTYPE_ANY = 2
XML_CTYPE_CHOICE = 5
XML_CTYPE_EMPTY = 1
XML_CTYPE_MIXED = 3
XML_CTYPE_NAME = 4
XML_CTYPE_SEQ = 6
__loader__ = None
__spec__ = None
# no functions
# no classes
| [
"qinkunpeng2015@163.com"
] | qinkunpeng2015@163.com |
acd76f51af16046ea805a215a3013626165e5c91 | e440cbf65b3b3a390d851df876ecb859ddaf2c5b | /marl/environments/particles/multiagent/scenarios/simple_spread.py | df3cdec30af0fe28579adbe711aea76c550598db | [
"MIT"
] | permissive | wangjie-ubuntu/badger-2019 | 9ead42f9f4d4ebf2a74f02cf0a53de88cbe96a43 | ccd8f428ad8aafad24f16d8e36ea31f6ab403dda | refs/heads/master | 2022-08-12T07:49:36.928571 | 2020-01-07T16:04:33 | 2020-01-07T16:04:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,012 | py | import numpy as np
from marl.environments.particles.multiagent.core import World, Agent, Landmark
from marl.environments.particles.multiagent.scenarios.custom.configurable_scenario import ConfigurableScenario
class Scenario(ConfigurableScenario):
""" Cooperative Navigation task in the https://arxiv.org/pdf/1706.02275.pdf
3 agents, 3 landmarks, each agent should sit on one landmark, implicit communication.
"""
num_landmarks: int
def __init__(self):
# parameters overriden by the setup
super().__init__(num_agents=3)
self.num_landmarks = 3
def setup(self,
num_agents: int,
num_landmarks: int,
rollout_length: int):
super().setup()
self.num_agents = num_agents
self.num_landmarks = num_landmarks
self.episode_length = rollout_length
def make_world(self):
world = World()
# set any world properties first
world.dim_c = 2
# num_agents = 3
# num_landmarks = 3
world.collaborative = True
# add emergent_comm
world.agents = [Agent() for i in range(self.num_agents)]
for i, agent in enumerate(world.agents):
agent.name = 'agent %d' % i
agent.collide = True
agent.silent = True
agent.size = 0.15
# add landmarks
world.landmarks = [Landmark() for i in range(self.num_landmarks)]
for i, landmark in enumerate(world.landmarks):
landmark.name = 'landmark %d' % i
landmark.collide = False
landmark.movable = False
# make initial conditions
self.reset_world(world)
return world
def reset_world(self, world):
super().reset_world(world)
# random properties for emergent_comm
for i, agent in enumerate(world.agents):
agent.color = np.array([0.35, 0.35, 0.85])
# random properties for landmarks
for i, landmark in enumerate(world.landmarks):
landmark.color = np.array([0.25, 0.25, 0.25])
# set random initial states
for agent in world.agents:
agent.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
for i, landmark in enumerate(world.landmarks):
landmark.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
landmark.state.p_vel = np.zeros(world.dim_p)
def benchmark_data(self, agent, world):
rew = 0
collisions = 0
occupied_landmarks = 0
min_dists = 0
for l in world.landmarks:
dists = [np.sqrt(np.sum(np.square(a.state.p_pos - l.state.p_pos))) for a in world.agents]
min_dists += min(dists)
rew -= min(dists)
if min(dists) < 0.1:
occupied_landmarks += 1
if agent.collide:
for a in world.agents:
if self.is_collision(a, agent):
rew -= 1
collisions += 1
return (rew, collisions, min_dists, occupied_landmarks)
def is_collision(self, agent1, agent2):
delta_pos = agent1.state.p_pos - agent2.state.p_pos
dist = np.sqrt(np.sum(np.square(delta_pos)))
dist_min = agent1.size + agent2.size
return True if dist < dist_min else False
def reward(self, agent, world):
# Agents are rewarded based on minimum agent distance to each landmark, penalized for collisions
rew = 0
for l in world.landmarks:
dists = [np.sqrt(np.sum(np.square(a.state.p_pos - l.state.p_pos))) for a in world.agents]
rew -= min(dists)
if agent.collide:
for a in world.agents:
if self.is_collision(a, agent):
rew -= 1
return rew
def observation(self, agent, world):
""" Composes the observation in the following manner, for each agent it is:
[vel_x, vel_y, pos_x, pos_y, [landmark positions], [other agent positions], comm. from others]
Communication not used now.
"""
super().observation(agent, world)
# get positions of all landmarks in this agent's reference frame
entity_pos = []
for entity in world.landmarks: # world.entities:
entity_pos.append(entity.state.p_pos - agent.state.p_pos)
# not used
# landmark colors
entity_color = []
for entity in world.landmarks: # world.entities:
entity_color.append(entity.color)
# communication and position of all other emergent_comm
comm = []
other_pos = []
for other in world.agents:
if other is agent: continue
comm.append(other.state.c)
other_pos.append(other.state.p_pos - agent.state.p_pos)
return np.concatenate([agent.state.p_vel] + [agent.state.p_pos] + entity_pos + other_pos + comm)
| [
"jaroslav.vitku@goodai.com"
] | jaroslav.vitku@goodai.com |
43b7e368be72ded30b0f5741ec6aaeae9d297fc1 | e535f59053b545b493c93c9945aa054ad1335178 | /tests/test_scripts/test_gen_namespace.py | eedf7912de2a7e4ff6f8882cfc227d6842658f13 | [
"CC0-1.0"
] | permissive | pabloalarconm/linkml | 9308669d5baba2a2c60fe79f31f737e87ed59295 | 5ef4b2f0e89698ffc0db693fdba68d1306438749 | refs/heads/main | 2023-08-25T14:41:58.419628 | 2021-10-02T02:04:06 | 2021-10-02T02:04:06 | 411,990,387 | 0 | 0 | CC0-1.0 | 2021-09-30T08:49:50 | 2021-09-30T08:49:50 | null | UTF-8 | Python | false | false | 1,107 | py | import unittest
from types import ModuleType
import click
from linkml.generators import namespacegen
from tests.test_scripts.environment import env
from tests.utils.clicktestcase import ClickTestCase
from tests.utils.filters import metadata_filter
from tests.utils.python_comparator import compare_python
class GenNamespaceTestCase(ClickTestCase):
testdir = "gennamespace"
click_ep = namespacegen.cli
prog_name = "gen-namespace"
env = env
def test_help(self):
self.do_test("--help", 'help')
def test_meta(self):
self.maxDiff = None
self.do_test([], 'meta_namespaces.py', filtr=metadata_filter,
comparator=lambda exp, act: compare_python(exp, act, self.env.expected_path('meta_namespaces.py')))
self.do_test('-f py', 'meta_namespaces.py', filtr=metadata_filter,
comparator=lambda exp, act: compare_python(exp, act, self.env.expected_path('meta_namespaces.py')))
self.do_test('-f xsv', 'meta_error', expected_error=click.exceptions.BadParameter)
if __name__ == '__main__':
unittest.main()
| [
"solbrig@jhu.edu"
] | solbrig@jhu.edu |
1440f3b31a140e37da83c49b68ae17d0efb1a5a3 | 38374bd02b3d88f26e3419fd94cebf292fa8460a | /jecta.py | 175470b3eb92aea6391e5733f7f49d8026c0433d | [] | no_license | thisismyrobot/jecta | fa6a78624a7f33f9804f297e377eb5ab9c84f85d | 53339112229b35f24df1d30abac695e904276c12 | refs/heads/master | 2020-05-18T18:19:40.011909 | 2010-01-20T03:45:31 | 2010-01-20T03:45:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,136 | py | import gtk
import signals
import widgets
import gobject
import database
class Jecta(object):
""" The Jecta application.
"""
def __init__(self):
#create signals
gobject.type_register(signals.Sender)
gobject.signal_new("jecta_data_received",
signals.Sender,
gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(gobject.TYPE_STRING,))
gobject.signal_new("jecta_tag_and_data_received",
signals.Sender,
gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(gobject.TYPE_STRING, gobject.TYPE_STRING))
gobject.signal_new("jecta_get_tag_for_data",
signals.Sender,
gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(gobject.TYPE_STRING,))
gobject.signal_new("jecta_add_to_db",
signals.Sender,
gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(gobject.TYPE_STRING, gobject.TYPE_STRING))
gobject.signal_new("jecta_dropper_clicked",
signals.Sender,
gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
())
gobject.signal_new("jecta_get_search_tag",
signals.Sender,
gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
())
gobject.signal_new("jecta_search_string_updated",
signals.Sender,
gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(gobject.TYPE_STRING,))
gobject.signal_new("jecta_search_db",
signals.Sender,
gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(gobject.TYPE_STRING,))
gobject.signal_new("jecta_search_results_ready",
signals.Sender,
gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(gobject.TYPE_PYOBJECT,))
gobject.signal_new("jecta_display_search_results",
signals.Sender,
gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(gobject.TYPE_PYOBJECT,))
#create the signal sender
sender = signals.Sender()
#create the controller
signals.Controller(sender)
#create windows, provide signal sender
drop_target = widgets.Dropper(sender)
widgets.Tagger(sender)
widgets.Searcher(sender)
database.Database(sender)
#show drop target
drop_target.show()
gtk.main()
if __name__ == "__main__":
Jecta()
| [
"rwallhead@gmail.com"
] | rwallhead@gmail.com |
fd2c3d282da1505204c3bfa937bae13dff575513 | cb69392e87f4faa67adb45b8f7937d834922dc60 | /rl_utils/actor_critic.py | 45ffb2478732af03108fa9b9cb5a871c1d06f36d | [] | no_license | maxme1/rl | 76c65a363f9d735af70ede4d3bf5b39e61a5ab41 | d0506d96f30884259b88222a00b60f56d2b8e7a3 | refs/heads/master | 2021-01-24T13:28:26.617952 | 2020-05-04T13:13:58 | 2020-05-04T13:13:58 | 123,175,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,037 | py | import torch
import torch.nn.functional as functional
from torch.autograd import Variable
from torch import nn
from rl_utils.interfaces.base import to_var
from rl_utils.utils import View
def calculate_loss(agent, memory, prepare_batch, gamma, entropy_weight, value_weight):
states, actions, rewards, done = prepare_batch(memory)
# TODO: add func for all this:
b, e = to_var(states[::2]), to_var(states[1::2], volatile=True)
actions = to_var(actions)
rewards = to_var(rewards).float()
done = torch.ByteTensor(done).cuda()
prob_logits, value = agent(b)
prob = functional.softmax(prob_logits, -1)
log_prob = functional.log_softmax(prob_logits, -1)
entropy = -(log_prob * prob).sum(1, keepdim=True)
log_prob = log_prob.gather(1, actions)
final_values = agent(e)[1]
final_values[done] = 0
final_values.volatile = False
cumulative_reward = final_values * gamma + rewards
value_loss = functional.mse_loss(value, cumulative_reward)
delta_t = cumulative_reward.data - value.data
policy_loss = - log_prob * Variable(delta_t) - entropy_weight * entropy
return policy_loss.mean() + value_weight * value_loss
def get_action(predict: Variable):
# predict == (prob_logits, value)
return functional.softmax(predict[0], -1).multinomial().data.cpu()[0]
class ActorCritic(nn.Module):
def __init__(self, input_channels, n_actions, n_features=3136):
super().__init__()
self.main_path = nn.Sequential(
nn.Conv2d(input_channels, 32, kernel_size=8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1),
nn.ReLU(),
View(n_features),
nn.Linear(n_features, 512),
nn.ReLU(),
)
self.probs = nn.Linear(512, n_actions)
self.value = nn.Linear(512, 1)
def forward(self, x):
x = self.main_path(x)
return self.probs(x), self.value(x)
| [
"maxs987@gmail.com"
] | maxs987@gmail.com |
a1ef391297dbf1e19b7dd5ada859142e7a247bfb | 2c38c2ea0328b75ba96a36346f71bd8ddeda3d35 | /qa/cancel_direct_offline.py | 93b77931436d654efd4d1274206fca4b23f65979 | [
"MIT"
] | permissive | TheButterZone/openbazaar-go | c6b76e6b7d4cb608f09c6f4dd5d62b97d5b1758d | afa185e7a929eb4ee659c53859a73b1dd53b3ae0 | refs/heads/master | 2021-06-27T06:24:54.645852 | 2017-09-09T03:21:30 | 2017-09-09T03:21:30 | 102,985,074 | 1 | 1 | null | 2017-09-09T21:08:08 | 2017-09-09T21:08:08 | null | UTF-8 | Python | false | false | 7,643 | py | import requests
import json
import time
from collections import OrderedDict
from test_framework.test_framework import OpenBazaarTestFramework, TestFailure
class CancelDirectOfflineTest(OpenBazaarTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 3
def run_test(self):
alice = self.nodes[0]
bob = self.nodes[1]
# generate some coins and send them to bob
time.sleep(4)
api_url = bob["gateway_url"] + "wallet/address"
r = requests.get(api_url)
if r.status_code == 200:
resp = json.loads(r.text)
address = resp["address"]
elif r.status_code == 404:
raise TestFailure("CancelDirectOfflineTest - FAIL: Address endpoint not found")
else:
raise TestFailure("CancelDirectOfflineTest - FAIL: Unknown response")
self.send_bitcoin_cmd("sendtoaddress", address, 10)
time.sleep(20)
# post listing to alice
with open('testdata/listing.json') as listing_file:
listing_json = json.load(listing_file, object_pairs_hook=OrderedDict)
api_url = alice["gateway_url"] + "ob/listing"
r = requests.post(api_url, data=json.dumps(listing_json, indent=4))
if r.status_code == 404:
raise TestFailure("CancelDirectOfflineTest - FAIL: Listing post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("CancelDirectOfflineTest - FAIL: Listing POST failed. Reason: %s", resp["reason"])
time.sleep(4)
# get listing hash
api_url = alice["gateway_url"] + "ipns/" + alice["peerId"] + "/listings.json"
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CancelDirectOfflineTest - FAIL: Couldn't get listing index")
resp = json.loads(r.text)
listingId = resp[0]["hash"]
# bob fetch listing to cache
api_url = bob["gateway_url"] + "ipfs/" + listingId
requests.get(api_url)
# shutdown alice
api_url = alice["gateway_url"] + "ob/shutdown"
requests.post(api_url, data="")
time.sleep(4)
# bob send order
with open('testdata/order_direct.json') as order_file:
order_json = json.load(order_file, object_pairs_hook=OrderedDict)
order_json["items"][0]["listingHash"] = listingId
api_url = bob["gateway_url"] + "ob/purchase"
r = requests.post(api_url, data=json.dumps(order_json, indent=4))
if r.status_code == 404:
raise TestFailure("CancelDirectOfflineTest - FAIL: Purchase post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("CancelDirectOfflineTest - FAIL: Purchase POST failed. Reason: %s", resp["reason"])
resp = json.loads(r.text)
orderId = resp["orderId"]
payment_address = resp["paymentAddress"]
payment_amount = resp["amount"]
if resp["vendorOnline"] == True:
raise TestFailure("CancelDirectOfflineTest - FAIL: Purchase returned vendor is online")
# check the purchase saved correctly
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CancelDirectOfflineTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_PAYMENT":
raise TestFailure("CancelDirectOfflineTest - FAIL: Bob purchase saved in incorrect state")
if resp["funded"] == True:
raise TestFailure("CancelDirectOfflineTest - FAIL: Bob incorrectly saved as funded")
# fund order
spend = {
"address": payment_address,
"amount": payment_amount,
"feeLevel": "NORMAL"
}
api_url = bob["gateway_url"] + "wallet/spend"
r = requests.post(api_url, data=json.dumps(spend, indent=4))
if r.status_code == 404:
raise TestFailure("CancelDirectOfflineTest - FAIL: Spend post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("CancelDirectOfflineTest - FAIL: Purchase POST failed. Reason: %s", resp["reason"])
time.sleep(20)
# check bob detected payment
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CancelDirectOfflineTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if len(resp["paymentAddressTransactions"]) <= 0:
raise TestFailure("CancelDirectOfflineTest - FAIL: Bob failed to detect his payment")
if resp["funded"] == False:
raise TestFailure("CancelDirectOfflineTest - FAIL: Bob incorrectly saved as unfunded")
if resp["state"] != "PENDING":
raise TestFailure("CancelDirectOfflineTest - FAIL: Bob purchase saved in incorrect state")
# bob cancel order
api_url = bob["gateway_url"] + "ob/ordercancel"
cancel = {"orderId": orderId}
r = requests.post(api_url, data=json.dumps(cancel, indent=4))
if r.status_code == 404:
raise TestFailure("CancelDirectOfflineTest - FAIL: Spend post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("CancelDirectOfflineTest - FAIL: Cancel POST failed. Reason: %s", resp["reason"])
time.sleep(4)
# bob check order canceled correctly
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CancelDirectOfflineTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "CANCELED":
raise TestFailure("CancelDirectOfflineTest - FAIL: Bob failed to save as canceled")
if "refundAddressTransaction" not in resp or resp["refundAddressTransaction"] == {}:
raise TestFailure("CancelDirectOfflineTest - FAIL: Bob failed to detect outgoing payment")
# startup alice again
self.start_node(alice)
self.send_bitcoin_cmd("generate", 1)
time.sleep(45)
# check alice detected order
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CancelDirectOfflineTest - FAIL: Couldn't load order from Alice %s", r.status_code)
resp = json.loads(r.text)
if resp["state"] != "CANCELED":
raise TestFailure("CancelDirectOfflineTest - FAIL: Alice failed to detect order cancellation")
# Check the funds moved into bob's wallet
api_url = bob["gateway_url"] + "wallet/balance"
r = requests.get(api_url)
if r.status_code == 200:
resp = json.loads(r.text)
confirmed = int(resp["confirmed"])
#unconfirmed = int(resp["unconfirmed"])
if confirmed <= 50 - payment_amount:
raise TestFailure("CancelDirectOfflineTest - FAIL: Bob failed to receive the multisig payout")
else:
raise TestFailure("CancelDirectOfflineTest - FAIL: Failed to query Bob's balance")
print("CancelDirectOfflineTest - PASS")
if __name__ == '__main__':
print("Running CancelDirectOfflineTest")
CancelDirectOfflineTest().main(["--regtest", "--disableexchangerates"])
| [
"chris@ob1.io"
] | chris@ob1.io |
20743807dd344294a34fe1864a3dfc8fd8b498f9 | 745e6021a466797a04435cc4060836c955985b89 | /apps/tickets/models.py | 088b985aef25e7db82c4887619e2575e91c95e2b | [] | no_license | anykate/ticketapi | f32cf5ed31065afab76a5765f583115f7e76eca5 | de816dea453c4fc5b8fc04f9296c9e0ab91749aa | refs/heads/master | 2020-07-03T03:00:15.230698 | 2019-08-11T16:31:34 | 2019-08-11T16:31:34 | 201,763,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,922 | py | from django.db import models
import uuid
from django.contrib.auth.models import User
from django.utils.text import slugify
# Create your models here.
def generate_ticket_id():
return str(uuid.uuid4()).split("-")[-1] # generate unique ticket id
class Category(models.Model):
name = models.CharField(max_length=200)
slug = models.SlugField(
editable=False,
unique=True,
max_length=255
)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
def _get_unique_slug(self):
slug = slugify(self.name)
unique_slug = slug
num = 1
while Category.objects.filter(slug=unique_slug).exists():
unique_slug = f'{slug}-{num}'
num += 1
return unique_slug
def save(self, *args, **kwargs):
if not self.slug:
self.slug = self._get_unique_slug()
super(Category, self).save(*args, **kwargs)
class Meta:
verbose_name_plural = 'categories'
ordering = ["-created"]
class Ticket(models.Model):
title = models.CharField(max_length=255)
user = models.ForeignKey(User, on_delete=models.CASCADE)
content = models.TextField()
category = models.ForeignKey(Category, on_delete=models.CASCADE)
ticket_id = models.CharField(max_length=255, blank=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __str__(self):
return f"{self.title} - {self.ticket_id}"
def save(self, *args, **kwargs):
if len(self.ticket_id.strip(" ")) == 0:
self.ticket_id = generate_ticket_id()
# Call the real save() method
super(Ticket, self).save(*args, **kwargs)
class Meta:
ordering = ["-created"]
| [
"aryamane.aniket@gmail.com"
] | aryamane.aniket@gmail.com |
be5c5e9e4c35cc10ddd8cbc5b8d36c8c0df48973 | 9680ba23fd13b4bc0fc3ce0c9f02bb88c6da73e4 | /Brian Heinold (243) ile Python/p10611b_sınav.py | a7c32a2bf1b3f9310b20a51945735eec99e834d5 | [] | no_license | mnihatyavas/Python-uygulamalar | 694091545a24f50a40a2ef63a3d96354a57c8859 | 688e0dbde24b5605e045c8ec2a9c772ab5f0f244 | refs/heads/master | 2020-08-23T19:12:42.897039 | 2020-04-24T22:45:22 | 2020-04-24T22:45:22 | 216,670,169 | 0 | 0 | null | null | null | null | ISO-8859-9 | Python | false | false | 759 | py | # coding:iso-8859-9 Türkçe
teklif1 = "Sayın "
teklif2 = "\n\nSize yeni Platin Artı İkramiyeli kartımızı %47.99 gibi\nçok özel bir tanıtım indirimiyle sunmaktan gurur duyuyorum.\n"
teklif3 = ", böyle bir teklif kimseye her gün pek sık yapılmaz;\nbu yüzden +90-800-314-1592 ücretsiz numaramızı hemen\naramanızı şiddetle tavsiye ediyorum.\nBöylesi indirimli tanıtım kampanya indirimini çok uzun süre devam\nettiremeyiz, "
teklif4 = ", bu yüzden hiç vakit yitirmeden\nhemen bizi aramalısınız!.."
giriş = input ("Açık ad soyadınızı giriniz: ")
if len(giriş) > 0:
try: ad = giriş[:giriş.index(' ')]
except ValueError: ad = giriş
print (teklif1, giriş, teklif2, ad, teklif3, ad, teklif4, sep="")
| [
"noreply@github.com"
] | mnihatyavas.noreply@github.com |
7e7b6b0a507848ab4173ff7b66c5f17459c3d342 | 8130c34d546c323d6d5d2ca6b4a67330af08828f | /.history/menu_app/views_20210104161734.py | 1036941bea0c7c7320204735b5813ff6004c6e42 | [] | no_license | lienusrob/final | ba2dad086fc97b21b537ef12df834dfadd222943 | f2726e31f1d51450e4aed8c74021c33679957b28 | refs/heads/master | 2023-02-15T01:36:54.463034 | 2021-01-07T12:47:05 | 2021-01-07T12:47:05 | 327,279,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,812 | py |
from .models import Cart, CartItem, MenuItem, ItemsCategory, Order, generate_order_id
from account_app.models import Profile
from .forms import AddToCartForm
from django.views.generic import ListView
from django.shortcuts import render, get_object_or_404, redirect
from django.urls import reverse
from django.utils import timezone
class MenuListView(ListView):
model = MenuItem
template_name = 'items/menu_list.html'
def menu_list_view(request):
item_list = MenuItem.objects.all()
context = {'item_list': item_list,
'item_categories':reversed(ItemsCategory.objects.all()),
'item_categories_side_nav':reversed(ItemsCategory.objects.all())}
return render(request, 'menu_app/menu_list.html', context)
def home(request):
category_menu = ItemsCategory.objects.all()
context = {'category_menu': category_menu}
return render (request, 'homepage.html', context)
def menu_item_detail(request, **kwargs):
item = MenuItem.objects.filter(id=kwargs.get('pk')).first()
context = {'item':item}
return render(request, 'menu_app/item_details.html', context)
def new_order_info(request):
user_profile = get_object_or_404(Profile, user=request.user)
order, created = Order.objects.get_or_create(customer=user_profile.user, is_ordered=False)
if created:
order.ref_code = generate_order_id()
order.save()
context = {'order':order}
return render(request, 'items/order_info.html', context)
def cart (request):
cart = Cart.objects.get(user = request.user, current = True)
cart_items = CartItem.objects.filter(cart = cart)
context = {'cart_items':cart_items}
return render (request, 'menu_app/cart.html', context )
def menu_details(request, name):
category = ItemsCategory.objects.get(name=name)
menu_details = MenuItem.objects.filter(category=category)
context = {'menu_details':menu_details, 'category':name, 'user':request.user}
if request.method=="POST":
form = AddToCartForm(request.POST or None)
form.cart = Cart.objects.get_or_create(user=request.user, current=True)
form.save()
#messages.success(request, "Item" "added to cart successfully!, please go to cart and check for items.")
return render(request, ('menu_app/menu_list.html'), context)
def cart(request):
cart = Cart.objects.get(user=request.user, current=True)
cart_items = CartItem.objects.filter(cart=cart)
#extras = Extras.objects.all()
context = {'cart_items':cart_items}
return render(request, 'menu_app/cart.html', context)
def view_cart(request):
"""A View that renders the cart contents page"""
return render(request, "cart.html")
def add_to_cart(request, id):
"""Add a quantity of the specified product to the cart"""
quantity = int(request.POST.get('quantity'))
cart = request.session.get('cart', {})
if id in cart:
cart[id] = int(cart[id]) + quantity
else:
cart[id] = cart.get(id, quantity)
request.session['cart'] = cart
return redirect('homepage')
def adjust_cart(request, id):
quantity = int(request.POST.get('quantity'))
cart = request.session.get('cart', {})
if quantity > 0:
cart[id] = quantity
else:
cart.pop(id)
request.session['cart'] = cart
return redirect('view_cart')
def orders (request):
cart = Cart.objects.get(user=request.user, current = True)
cart_items = CartItem.objects.filter(cart__pk__ = cart.pk)
if request.method == "POST":
for key, value in request.POST.items():
if key == "csrfmiddleweartoken":
continue
cart.current == False
cart.date_ordered= timezone.now()
cart.save()
order= orders (cart = cart)
| [
"lienus.rob@hotmail.de"
] | lienus.rob@hotmail.de |
91603cf08d37714b4e52f6dd5ab7176a319eee9d | 038131f491c44ff30e9f403cb46ff5e5c91a5528 | /amuse_util/data/__init__.py | b5a3ed9db49480c30fe8fd7fee51a4dee234e689 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | nstarman/amuse_util | 3d94ec44f21d5bf0da8b97c50c90f180245fecc1 | 5086b5db10fe96e3d797a5ed59b76fefb41a61b1 | refs/heads/master | 2023-04-07T11:43:19.697900 | 2021-06-25T18:54:18 | 2021-06-25T18:54:18 | 239,362,836 | 2 | 1 | NOASSERTION | 2021-06-25T18:54:19 | 2020-02-09T19:39:31 | Python | UTF-8 | Python | false | false | 1,039 | py | # -*- coding: utf-8 -*-
# see LICENSE.rst
# ----------------------------------------------------------------------------
#
# TITLE : Data
# AUTHOR : Nathaniel Starkman
# PROJECT : amuse_util
#
# ----------------------------------------------------------------------------
"""Data Management.
Often data is packaged poorly and it can be difficult to understand how
the data should be read.
DON'T PANIC.
This module provides functions to read the contained data.
"""
__author__ = "Nathaniel Starkman"
# __credits__ = [""]
# __all__ = [
# ""
# ]
###############################################################################
# IMPORTS
###############################################################################
# CODE
###############################################################################
def function():
"""Docstring."""
pass
# /def
# ------------------------------------------------------------------------
###############################################################################
# END
| [
"nstarkman@protonmail.com"
] | nstarkman@protonmail.com |
0a1ddca836a6ecb459147efae425ba4ceb743d4d | db68e4cf7ae7c9880aecdcee48c8b41aecc6eb65 | /torch/distributions/gamma.py | d9db0d8883e0f9463883409e8db28837eb9a4149 | [
"BSD-2-Clause"
] | permissive | mbp28/pytorch | 175c8e1821dd6e4fda9d1d3f9e2edbd604bf4150 | d450895a74b84672b02f3fbbaa7ccbdd9b6a3335 | refs/heads/master | 2021-01-25T09:08:33.799198 | 2017-12-14T17:31:58 | 2017-12-14T17:31:58 | 93,779,621 | 0 | 0 | null | 2017-06-08T18:26:15 | 2017-06-08T18:26:15 | null | UTF-8 | Python | false | false | 1,814 | py | from numbers import Number
import torch
from torch.autograd import Variable, Function
from torch.autograd.function import once_differentiable
from torch.distributions.distribution import Distribution
from torch.distributions.utils import expand_n, broadcast_all
class _StandardGamma(Function):
@staticmethod
def forward(ctx, alpha):
x = torch._C._standard_gamma(alpha)
ctx.save_for_backward(x, alpha)
return x
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
x, alpha = ctx.saved_tensors
grad = torch._C._standard_gamma_grad(x, alpha)
return grad_output * grad
def _standard_gamma(alpha):
if not isinstance(alpha, Variable):
return torch._C._standard_gamma(alpha)
return _StandardGamma.apply(alpha)
class Gamma(Distribution):
r"""
Creates a Gamma distribution parameterized by shape `alpha` and rate `beta`.
Example::
>>> m = Gamma(torch.Tensor([1.0]), torch.Tensor([1.0]))
>>> m.sample() # Gamma distributed with shape alpha=1 and rate beta=1
0.1046
[torch.FloatTensor of size 1]
Args:
alpha (float or Tensor or Variable): shape parameter of the distribution
beta (float or Tensor or Variable): rate = 1 / scale of the distribution
"""
has_rsample = True
def __init__(self, alpha, beta):
self.alpha, self.beta = broadcast_all(alpha, beta)
def sample(self):
return _standard_gamma(self.alpha) / self.beta
def sample_n(self, n):
return _standard_gamma(expand_n(self.alpha, n)) / self.beta
def log_prob(self, value):
return (self.alpha * torch.log(self.beta) +
(self.alpha - 1) * torch.log(value) -
self.beta * value - torch.lgamma(self.alpha))
| [
"adam.paszke@gmail.com"
] | adam.paszke@gmail.com |
dfd950972cd9f6e06897317ef128b10a9abaf3ea | 0f9f8e8478017da7c8d408058f78853d69ac0171 | /python3/l0114_flatten_binary_tree_to_linked_list.py | bb97c41a16aab1a7c00df25c40d8b7ddb637cd41 | [] | no_license | sprax/1337 | dc38f1776959ec7965c33f060f4d43d939f19302 | 33b6b68a8136109d2aaa26bb8bf9e873f995d5ab | refs/heads/master | 2022-09-06T18:43:54.850467 | 2020-06-04T17:19:51 | 2020-06-04T17:19:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 822 | py | from common import TreeNode
import common
class Solution:
def flatten(self, root: TreeNode) -> None:
"""
Do not return anything, modify root in-place instead.
"""
def flatten_and_return_end(root: TreeNode) -> TreeNode:
if not root:
return None
if not root.left and not root.right:
return root
if not root.left:
return flatten_and_return_end(root.right)
flatten_end = flatten_and_return_end(root.left)
if root.right:
flatten_end.right = root.right
flatten_end = flatten_and_return_end(root.right)
root.right = root.left
root.left = None
return flatten_end
flatten_and_return_end(root) | [
"zhoulv82@gmail.com"
] | zhoulv82@gmail.com |
ce9c98fb960f27a183c05a296f22d83dfe5b8df4 | ef3a7391b0a5c5d8e276355e97cbe4de621d500c | /venv/Lib/site-packages/caffe2/python/ideep/conv_transpose_test.py | be35dbd8a38205287c81fd5bdb172505e25bed67 | [
"Apache-2.0"
] | permissive | countBMB/BenjiRepo | 143f6da5d198ea6f06404b4559e1f4528b71b3eb | 79d882263baaf2a11654ca67d2e5593074d36dfa | refs/heads/master | 2022-12-11T07:37:04.807143 | 2019-12-25T11:26:29 | 2019-12-25T11:26:29 | 230,090,428 | 1 | 1 | Apache-2.0 | 2022-12-08T03:21:09 | 2019-12-25T11:05:59 | Python | UTF-8 | Python | false | false | 2,701 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
from hypothesis import assume, given, settings
import hypothesis.strategies as st
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class ConvTransposeTest(hu.HypothesisTestCase):
@given(stride=st.integers(1, 2),
pad=st.integers(0, 3),
kernel=st.integers(1, 5),
adj=st.integers(0, 2),
size=st.integers(7, 10),
input_channels=st.integers(1, 8),
output_channels=st.integers(1, 8),
batch_size=st.integers(1, 3),
use_bias=st.booleans(),
training_mode=st.booleans(),
compute_dX=st.booleans(),
**mu.gcs)
@settings(max_examples=2, timeout=100)
def test_convolution_transpose_gradients(self, stride, pad, kernel, adj,
size, input_channels,
output_channels, batch_size,
use_bias, training_mode,
compute_dX, gc, dc):
training = 1 if training_mode else 0
assume(adj < stride)
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
w = np.random.rand(
input_channels, output_channels, kernel, kernel)\
.astype(np.float32) - 0.5
b = np.random.rand(output_channels).astype(np.float32) - 0.5
op = core.CreateOperator(
"ConvTranspose",
["X", "w", "b"] if use_bias else ["X", "w"],
["Y"],
stride=stride,
kernel=kernel,
pad=pad,
adj=adj,
training_mode=training,
no_gradient_to_input=not compute_dX,
)
inputs = [X, w, b] if use_bias else [X, w]
self.assertDeviceChecks(dc, op, inputs, [0], threshold=0.001)
if training_mode:
if use_bias and compute_dX:
# w, b, X
outputs_to_check = [1, 2, 0]
elif use_bias:
# w, b
outputs_to_check = [1, 2]
elif compute_dX:
# w, X
outputs_to_check = [1, 0]
else:
# w
outputs_to_check = [1]
for i in outputs_to_check:
self.assertGradientChecks(gc, op, inputs, i, [0])
if __name__ == "__main__":
unittest.main()
| [
"bengmen92@gmail.com"
] | bengmen92@gmail.com |
22f210847b373c906655532a6d7e88e2ba23996e | e25dca25850ee8ee4ff929cba26ad66bfc6f15bb | /slowedml/phylip.py | 9fd44efaa6e268a049b531a3634bd36fb64ad3cf | [] | no_license | argriffing/slowedml | 704db5a4d15b8a5b5eb464e480bc7dd45ad56dc5 | 02907cd08210e4cf550885eb42ec5372b0f45c72 | refs/heads/master | 2021-01-01T17:52:02.647617 | 2013-02-19T16:34:12 | 2013-02-19T16:34:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,501 | py | """
Read a tiny subset of phylip interleaved alignment files.
"""
def read_interleaved_codon_alignment(fin):
"""
Yield columns of the alignment.
This function is not as stream-oriented as its interface may suggest.
In particular, it eats tons of memory for no good reason.
@param fin: a stream open for reading
"""
# read rows of string sequences
rows = []
for line in fin:
row = line.split()
if row:
rows.append(row)
# init the list of columns
col_list = []
# get the number of taxa and the total number of nucleotides
header_row = rows[0]
s_ntaxa, s_nnucs = rows[0][:2]
ntaxa = int(s_ntaxa)
nnucs = int(s_nnucs)
# read the taxon names from the first paragraph
taxon_names = []
for row in rows[1:1+ntaxa]:
taxon_names.append(row[0])
# check that the data is in the expected format
if len(rows) % ntaxa == 1:
nparagraphs = (len(rows) - 1) / ntaxa
else:
raise Exception
# yield a column consisting of the taxon names
yield tuple(taxon_names)
# go through the input rows, paragraph by paragraph
for i in range(nparagraphs):
# the first paragraph has taxon names prefixed to its rows
paragraph = rows[i*ntaxa + 1 : (i+1)*ntaxa + 1]
if i == 0:
paragraph = [row[1:] for row in paragraph]
# convert the paragraph into codon columns
for column in zip(*paragraph):
yield column
| [
"argriffi@ncsu.edu"
] | argriffi@ncsu.edu |
0e34cbf3b1e05ed06cc5806383597034a2a8b89e | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5662291475300352_0/Python/Astrae/1C.py | 6537ddd1a9860397cfa68b4bd67388263edcca3f | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 3,074 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 10 21:34:08 2015
@author: Fred
"""
import numpy as np
def main(ifn='C-small-1-attempt0.in',ofn='output.txt'):
with open(ifn) as inf: # ouvre l'input et le renomme en inf
with open(ofn,'w') as ouf: # crée l'output dans lequel on va écrire
noc = int(inf.readline().strip()) # permet de lire la 1ere ligne
# en général le nbr de cas
# le .strip() permet de virer les espace
for tnoc in range(noc): # boucle en fonction du nbr de cas
ouf.write("Case #%d: " %(tnoc+1)) # case dans laquelle on écrit en sortie
# on commence par lire le nombre de tribus
i=int(inf.readline().strip()) # convertit en liste une ligne en virant les espaces
L1=[0]*i
L2=[0]*i
L3=[0]*i
for k in range(i):
a=inf.readline().strip().split(' ')
L1[k]=int(a[0])
L2[k]=int(a[1])
L3[k]=int(a[2])
print L1
print L2
print L3
# nbr de marcheurs
N=sum(L2)
print N
goal=[] # on va mettre en combien de temps chaque marcheur termine 1 marche
for k in range(len(L1)): # pour chaque groupe de marcheur
pos=L1[k]
for j in range(L2[k]): # pour chaque marcheur
v=float(360)/(L3[k]+j) # vitesse du j-ieme marcheur du groupe
goal=goal+[(360.-L1[k])/v]
print goal
# ensuite pour chaque marcheur, il faudrait savoir combien de tour il fait en un temps t
nbrtour=[] # on va mettre en combien de temps chaque marcheur termine 1 marche
t=max(goal) # temps minimal
for k in range(len(L1)): # pour chaque groupe de marcheur
pos=L1[k]
for j in range(L2[k]): # pour chaque marcheur
v=float(360)/(L3[k]+j) # vitesse du j-ieme marcheur du groupe
nbrtour=nbrtour+[int(floor(((v*t+L1[k]))/360))]
print nbrtour
resultat=0
if N==1: # si un seul marcheur
resultat=0
elif N==2:
if max(nbrtour)>1:
resultat=1
ouf.write("%d\n" %resultat) # recopie le nombre puis saute une ligne
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
0d408e89af86671233cb41aa984c3d6dcb8a796c | e72ff96f633d065d23d16f550f0f7efc8901e10c | /blog/migrations/0001_initial.py | 7f6a73d3cac557af73584b5dc07e3005f01e3c4f | [
"MIT"
] | permissive | mhadiahmed/eCommerce | 5e9801978a62829e27566bfbc358cc3e2bb600ae | 68e6013d7d66b2a44e256b65956c507bdd7d1bd1 | refs/heads/master | 2021-01-19T11:08:51.437886 | 2018-08-28T15:08:34 | 2018-08-28T15:08:34 | 87,930,716 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,227 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-01-04 19:47
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=120)),
('Type', models.CharField(choices=[('Null', 'Null'), ('Phone', 'Phone'), ('Car', 'Car'), ('Laptop', 'Laptop'), ('jops', 'Jops'), ('Electronic', 'Electronic'), ('Clothes', 'Clothes'), ('Makeup', 'Makeup'), ('Furnishings', 'Furnishings'), ('books', 'books'), ('sports', 'sports'), ('Property', 'Property'), ('Other', 'Other')], default='Null', max_length=120)),
('company', models.CharField(max_length=120)),
('dis', models.TextField(default='in here you w,ll write all the discribtion about your product')),
('image', models.ImageField(blank=True, height_field='height_field', null=True, upload_to='', width_field='width_field')),
('width_field', models.IntegerField(default=0)),
('height_field', models.IntegerField(default=0)),
('case', models.CharField(choices=[('Null', 'Null'), ('New', 'New'), ('Old', 'Old'), ('Second Hand', 'Second Hand'), ('Other', 'Other')], default=99, max_length=120)),
('price', models.BigIntegerField(default=0)),
('address', models.CharField(max_length=120)),
('draft', models.BooleanField(default=False)),
('date', models.DateTimeField(auto_now=True)),
('puplis', models.DateTimeField(auto_now_add=True)),
('auth', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-date', '-puplis'],
},
),
]
| [
"mhadiahmed63@gmail.com"
] | mhadiahmed63@gmail.com |
5bc79658513a89728f29d61dcd7edb8969d4b19e | bcf74743a974159566d2d6a1a4158088a64df760 | /wcivf/apps/people/migrations/0010_auto_20170306_1206.py | 11dff493427a935a0ac0823cf8d0b7bb3d7f99b0 | [] | no_license | madcsaba/WhoCanIVoteFor | 16d8b946f236358285de34c9248cde81be15433c | bb658e3f7f705fe81265149d1a50e33ba04c3fec | refs/heads/master | 2021-04-06T20:19:20.254734 | 2018-03-12T10:30:08 | 2018-03-12T10:30:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-06 12:06
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('elections', '0015_auto_20170304_1354'),
('people', '0009_auto_20170303_1823'),
]
operations = [
migrations.AlterUniqueTogether(
name='personpost',
unique_together=set([('person', 'post', 'election')]),
),
]
| [
"sym.roe@talusdesign.co.uk"
] | sym.roe@talusdesign.co.uk |
5dfa55cdd8c387c3fe142aa742ed535aea613cf9 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/299/100765/submittedfiles/minha_bib.py | a1cc038e961e8aaf3777812018c47bdcb9fef29c | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,608 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
def sorteio(x,y):
import random
sort=random.randint(0,2)
if sort==0:
return(0)
if sort==2:
return(2)
def sorteio2(x,y):
import random
sort=random.randint(0,2)
if sort==0:
return(0)
if sort==2:
return(2)
else:
return(1)
def verificar_vitoria(x):
#Vitoria na linha
for i in range(1,3,1):
for j in range(1,3,1):
if x[i-1][j-1]==x[i][j]==" ":
cont+=1
#analise linha
for i in range(1,3,1):
for j in range(1,3,1):
if x[i][j-1]==x[i][j]:
cont+=0
else:
cont+=1
#Vitoria na coluna
for j in range(1,3,1):
for i in range(1,3,1):
if x[i-1][j]==x[i][j]:
cont+=0
else:
cont+=1
#Vitoria na diagonal
if x[0][0]==x[1][1] and x[1][1]==x[2][2]:
cont+=0
elif x[2][0]==x[1][1] and x[1][1]==x[0][2]:
cont+=0
else:
cont+=1
if cont==0:
return True
else;
return False
def maquinainteligente(x):
from minha_bib import sorteio2
#linhas
#primeira
if x[0][0]==x[0][2]:
return '01'
elif x[0][1]==x[0][2]:
return '00'
elif x[0][0]==x[0][1]:
return '02'
#segunda
elif x[1][0]==x[1][1]:
return '12'
elif x[1][1]==x[1][2]:
return '10'
elif x[1][0]==x[1][2]:
return'11'
#terceira
elif x[2][0]==x[2][1]:
return '22'
elif x[2][1]==x[2][2]:
return '20'
elif x[2][0]==x[2][2]:
return '21'
#colunas
#primeira
elif x[0][0]==x[1][0]:
return '20'
elif x[1][0]==x[2][0]:
return '00'
elif x[0][0]==x[2][0]:
return '10'
#segunda
elif x[0][1]==x[1][1]:
return '21'
elif x[1][1]==x[2][1]:
return '01'
elif x[0][1]==x[2][1]:
return '11'
#terceira
elif x[0][2]==x[1][2]:
return '22'
elif x[1][2]==x[2][2]:
return '02'
elif x[0][2]==x[2][2]:
return '12'
#diagonal
elif x[0][0]==x[1][1]:
return '22'
elif x[1][1]==x[2][2]:
return '00'
elif x[0][0]==x[2][2]:
return '11'
#segunda
elif x[2][0]==x[1][1]:
return '02'
elif x[1][1]==x[0][2]:
return '20'
elif x[2][0]==x[0][2]:
return '11'
else:
return '11'
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
68ce3ac1f75f709080e7a2848a47b98cf44a14ce | c426f4d6f7e5b9f9a6527ae48b2a8932f6e9a000 | /mayan/apps/storage/admin.py | 072790cdd14a67e38c195d64ac8f06a24de0519f | [
"Apache-2.0"
] | permissive | telsch/Mayan-EDMS | 208d6dbc08e2fa76f3132c8761f247e02768333e | 5f2bdfbf1f3d109e403b2d1f3f81b79bdb9cc35f | refs/heads/master | 2023-03-21T13:02:40.721699 | 2021-03-14T05:02:54 | 2021-03-14T05:02:54 | 254,727,379 | 0 | 1 | null | 2020-04-10T20:15:32 | 2020-04-10T20:15:31 | null | UTF-8 | Python | false | false | 280 | py | from django.contrib import admin
from .models import SharedUploadedFile
@admin.register(SharedUploadedFile)
class SharedUploadedFileAdmin(admin.ModelAdmin):
date_hierarchy = 'datetime'
list_display = ('file', 'filename', 'datetime',)
readonly_fields = list_display
| [
"roberto.rosario@mayan-edms.com"
] | roberto.rosario@mayan-edms.com |
ecd4f5beea18b9c1d6195018fcc353f0c32f5dbf | c7b669b7352c9eee3d17f9e57b2fc0df0907f3eb | /Day01/ex06/my_sort.py | fba457e1793d06ff3df89dfcb97e89091546e0d6 | [] | no_license | m1n-q/django-inside | 89da9514c7b4d6d2d883df720b317ce4ea536590 | 9b70915cd798285de096974f9eb271f338756250 | refs/heads/main | 2023-07-18T00:18:15.327135 | 2021-09-02T09:22:31 | 2021-09-02T09:22:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 760 | py | def sort_name():
d = {
'Hendrix': '1942',
'Allman': '1946',
'King': '1925',
'Clapton': '1945',
'Johnson': '1911',
'Berry': '1926',
'Vaughan': '1954',
'Cooder': '1947',
'Page': '1944',
'Richards': '1943',
'Hammett': '1962',
'Cobain': '1967',
'Garcia': '1942',
'Beck': '1944',
'Santana': '1947',
'Ramone': '1948',
'White': '1975',
'Frusciante': '1970',
'Thompson': '1949',
'Burton': '1939',
}
l = []
for k, v in d.items():
l.append((v, k))
l.sort()
for y, n in l:
print(n)
if __name__ == '__main__':
sort_name()
| [
"m1n_q@naver.com"
] | m1n_q@naver.com |
934096cedb5dd7e57b0fe700e2d0d53edde2c14a | da85d4caf3e5e1c9df8839fafd51f960f02daadd | /FabricUI/_version.py | df068b6b98498c361cd2a5f5a6b5c6ac6cdc9472 | [
"Apache-2.0"
] | permissive | shuaih7/FabricUI | 6efe58f3dbefebbd49607094a28bf2d7bc9314ca | 6501e8e6370d1f90174002f5768b5ef63e8412bc | refs/heads/main | 2023-04-13T10:07:42.090043 | 2021-04-13T02:55:12 | 2021-04-13T02:55:12 | 314,152,777 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | """ package version """
VERSION_TAG = "1.0.0"
__version__ = VERSION_TAG
def print_version():
""" Print full version information """
print("FabricUI version: {0}".format(VERSION_TAG)) | [
"shuaih7@gmail.com"
] | shuaih7@gmail.com |
c896e55601b73e9c564bb1308954f7f623f82312 | e44169033ae1dd01397b5ceeccb84ee6fc7a0009 | /challenge5/shiyanlou/shiyanlou/spiders/github.py | 1d4243d0864ba0a909fa0ffca404952c01495a62 | [] | no_license | Yao-Phoenix/data_challenge | 6d4f7bf666c92bdc19c04cdfd4d17ba8767a6056 | 287ae2c1b786596b0c9b5a36d54426e38970ba76 | refs/heads/master | 2021-02-20T23:55:41.852331 | 2020-03-31T02:19:05 | 2020-03-31T02:19:05 | 245,347,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,004 | py | # -*- coding: utf-8 -*-
import scrapy
from shiyanlou.items import ShiyanlouItem
from scrapy.linkextractors import LinkExtractor
from scrapy.spider import Rule
class GithubSpider(scrapy.spiders.CrawlSpider):
name = 'github'
allowed_domains = ['github.com']
start_urls = ['https://github.com/shiyanlou?tab=repositories']
rules = (Rule(
LinkExtractor(
allow='https://github.com/shiyanlou\?after=*'),
callback='parse_item',
follow=True),
Rule(
LinkExtractor(
allow='https://github.com/shiyanlou\?tab=repositories$'),
callback='parse_item',
follow=True),)
def parse_item(self, response):
item = ShiyanlouItem()
for data in response.css('li.col-12'):
item['repo_name'] = data.xpath('.//h3/a/text()').extract_first().strip()
item['update_time'] = data.xpath('.//relative-time/@datetime'
).extract_first()
yield item
| [
"493867456@qq.com"
] | 493867456@qq.com |
449bf91a1aa96f17143c3ca9ec1f7cc62665bbc9 | 3562aee827438794ec17250f033818fa699d9c56 | /config/settings/local.py | 166824d1e6eeb7bd8df3b95e2a43af9cd26e27af | [
"MIT"
] | permissive | feastkl-app/attendance-tracker-v2 | 509d51142a9882e7b591b1349bce944bf2274f55 | 38670cbf6e1f1a44becc94fb3c90ec33fda68b9b | refs/heads/master | 2021-05-02T03:47:35.547933 | 2018-02-10T04:49:21 | 2018-02-10T04:49:21 | 120,903,586 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,039 | py | """
Local settings for Attendance Tracker v2 project.
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
"""
from .base import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env('DJANGO_SECRET_KEY', default='ogk7kTeUPzZhqOIgpsLT2c3KMUknJFkge60M0cqz6cM6zSih5s')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_PORT = 1025
EMAIL_HOST = 'localhost'
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware', ]
INSTALLED_APPS += ['debug_toolbar', ]
INTERNAL_IPS = ['127.0.0.1', '10.0.2.2', ]
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ['django_extensions', ]
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
| [
"rpebenito@xyber.ph"
] | rpebenito@xyber.ph |
7521983485a9ed8ab0dbbdc43354ae8de7901ef3 | a2eaa3decc385dea227da8a99203f767f32cf941 | /scientific_expedition/call_to_home.py | 514d1f866aac2f91ffa05e36acff4c7404569275 | [] | no_license | vlad-bezden/py.checkio | 94db32111eeeb2cd90c7b3c4606ea72cf2bb6678 | 6cd870ca3056cc9dcdce0ad520c27e92311719b3 | refs/heads/master | 2021-07-01T18:39:35.955671 | 2020-10-05T00:56:38 | 2020-10-05T00:56:38 | 93,111,389 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,912 | py | """Call to Home
https://py.checkio.org/en/mission/calls-home/
Nicola believes that Sophia calls to Home too much and her
phone bill is much too expensive. He took the bills for Sophia's
calls from the last few days and wants to calculate how much it costs.
The bill is represented as an array with information about the calls.
Help Nicola to calculate the cost for each of Sophia calls.
Each call is represented as a string with date, time and duration
of the call in seconds in the follow format:
"YYYY-MM-DD hh:mm:ss duration"
The date and time in this information are the start of the call.
Space-Time Communications Co. has several rules on how
to calculate the cost of calls:
First 100 (one hundred) minutes in one day are priced at 1 coin per minute;
After 100 minutes in one day, each minute costs 2 coins per minute;
All calls are rounded up to the nearest minute.
For example 59 sec ≈ 1 min, 61 sec ≈ 2 min;
Calls count on the day when they began.
For example if a call was started 2014-01-01 23:59:59,
then it counted to 2014-01-01;
For example:
2014-01-01 01:12:13 181
2014-01-02 20:11:10 600
2014-01-03 01:12:13 6009
2014-01-03 12:13:55 200
First day -- 181s≈4m -- 4 coins;
Second day -- 600s=10m -- 10 coins;
Third day -- 6009s≈101m + 200s≈4m -- 100 + 5 * 2 = 110 coins;
Total -- 124 coins.
Input: Information about calls as a tuple of strings.
Output: The total cost as an integer.
Precondition: 0 < len(calls) ≤ 30
0 < call_duration ≤ 7200
The bill is sorted by datetime.
"""
from itertools import groupby
from math import ceil
from typing import Tuple
def total_cost(calls: Tuple[str, ...]) -> int:
"""Calculate total cost.
ceil(int(m[20:]) / 60) - rounds to the nearest minute
max(mins, mins * 2 - 100) - calculates cost
"""
return sum(
max(mins, mins * 2 - 100)
for mins in (
sum(ceil(int(m[20:]) / 60) for m in t)
for _, t in groupby(calls, lambda i: i[:10])
)
)
if __name__ == "__main__":
result = total_cost(
(
"2014-01-01 01:12:13 181",
"2014-01-02 20:11:10 600",
"2014-01-03 01:12:13 6009",
"2014-01-03 12:13:55 200",
)
)
assert result == 124, "Base example"
result = total_cost(
(
"2014-02-05 01:00:00 1",
"2014-02-05 02:00:00 1",
"2014-02-05 03:00:00 1",
"2014-02-05 04:00:00 1",
)
)
assert result == 4, "Short calls but money..."
result = total_cost(
(
"2014-02-05 01:00:00 60",
"2014-02-05 02:00:00 60",
"2014-02-05 03:00:00 60",
"2014-02-05 04:00:00 6000",
)
)
assert result == 106, "Precise calls"
print("PASSED!!!")
| [
"vlad.bezden@gmail.com"
] | vlad.bezden@gmail.com |
c8a369c28a65789362fbf679e7d7872192bb9b8e | e21837ee462fb31a088bd903ecdbb96631020d0a | /Arcade/The core/Forest Edge/concatenateArray.py | dcf7dcfd033db1a5cb4ec723c39db8792f792b53 | [] | no_license | delta94/Code_signal- | 7965ee96a858425c65c7a51a47833d80c8e6d8d3 | 1383a528b4353b7b8db4a6634ea0caa2b5895f9d | refs/heads/master | 2020-07-31T05:38:22.158742 | 2019-04-12T18:46:16 | 2019-04-12T18:46:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | """
Given two arrays of integers a and b, obtain the array formed by the elements of a followed by the elements of b.
Example
For a = [2, 2, 1] and b = [10, 11], the output should be
concatenateArrays(a, b) = [2, 2, 1, 10, 11].
"""
def concatenateArrays(a, b):
return a + b
| [
"noreply@github.com"
] | delta94.noreply@github.com |
44eb9d21029ed8f03f9e1da60e1eca309c0cd116 | a70da47d4a09865bf6c44b8d61069c2724cdc9a4 | /friendface/web/external.py | 29f7f98911492854bd69af5dd5c3b88bfab24078 | [
"BSD-2-Clause"
] | permissive | eblade/friendface | f1251fd59894c2330567e3872c565049c2c13471 | 152e7d743cf7a4f75c95a24176127e901cfcb652 | refs/heads/master | 2021-01-15T15:30:58.870507 | 2016-01-19T07:55:51 | 2016-01-19T07:55:51 | 48,060,039 | 0 | 0 | null | 2016-01-19T07:33:43 | 2015-12-15T18:00:41 | JavaScript | UTF-8 | Python | false | false | 1,122 | py | # -*- coding: utf-8 -*-
from bottle import HTTPResponse
from .api import Api
class ExternalApi(Api):
def __init__(self, session, app):
self.session = session
self.app = app
# Set Up External Endpoints
app.route('/m/<message_id>', 'GET', self.get_message_by_id)
app.route('/b/<message_id>', 'GET', self.get_branch_by_id)
# override
def get_thread_name(self):
return 'external_api'
def get_message_by_id(self, message_id):
message = self.session.get_message(message_id)
if message is None:
return HTTPResponse('Unknown message', 404)
body, headers = message.to_http()
raise HTTPResponse(
body=body,
status=200,
headers=headers,
)
def get_branch_by_id(self, message_id):
branch = self.session.get_branch(message_id)
if branch is None:
return HTTPResponse('Unknown branch', 404)
raise HTTPResponse(
branch.to_uri_list(),
status=200,
headers={'Content-Type': 'text/uri-list'},
)
| [
"johan@egneblad.se"
] | johan@egneblad.se |
3c6719eb8f4251d97d68d4d931cb3d06c0651282 | 3de87aebbd1db2b1a241415d169e4d338216975d | /tests/filter_integration_tests/test_filters_with_mongo_storage.py | ada988c2f1602bb7d1285a65a7b018f3fe8c2f74 | [
"BSD-3-Clause"
] | permissive | SeppPenner/ChatterBot | 42543b49343ac7336371ddbf5dbd70177b2725c1 | 4a1201c0697ebc787cbf348e372ced1fdceb4efb | refs/heads/master | 2021-05-11T07:37:56.466278 | 2020-02-20T08:58:38 | 2020-02-20T08:58:38 | 118,025,056 | 1 | 0 | BSD-3-Clause | 2020-02-20T08:58:39 | 2018-01-18T19:00:50 | Python | UTF-8 | Python | false | false | 981 | py | from tests.base_case import ChatBotMongoTestCase
class RepetitiveResponseFilterTestCase(ChatBotMongoTestCase):
"""
Test case for the RepetitiveResponseFilter class.
"""
def test_filter_selection(self):
"""
Test that repetitive responses are filtered out of the results.
"""
from chatterbot.filters import RepetitiveResponseFilter
from chatterbot.trainers import ListTrainer
self.chatbot.filters = (RepetitiveResponseFilter(), )
self.chatbot.set_trainer(ListTrainer)
self.chatbot.train([
'Hello',
'Hi',
'Hello',
'Hi',
'Hello',
'Hi, how are you?',
'I am good.'
])
first_response = self.chatbot.get_response('Hello')
second_response = self.chatbot.get_response('Hello')
self.assertEqual(first_response.text, 'Hi')
self.assertEqual(second_response.text, 'Hi, how are you?')
| [
"gunthercx@gmail.com"
] | gunthercx@gmail.com |
50af79ea848568965634294186fcface1d2ec00d | 5a1f77b71892745656ec9a47e58a078a49eb787f | /8_Cloudrip_Mountain/413-Medic_School/medical_school.py | 89f7c27ff1d706f7e9c4bb89b27fe72157f9d8c4 | [
"MIT"
] | permissive | ripssr/Code-Combat | 78776e7e67c033d131e699dfeffb72ca09fd798e | fbda1ac0ae4a2e2cbfce21492a2caec8098f1bef | refs/heads/master | 2020-06-11T20:17:59.817187 | 2019-07-21T09:46:04 | 2019-07-21T09:46:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 545 | py | def startsWith(phrase, word):
if len(word) > len(phrase):
return False
for i in range(len(word)):
if phrase[i] != word[i]:
return False
return True
def onHear(event):
if startsWith(event.speaker.id, "Exp"):
potion = pet.findNearestByType("potion")
if potion:
pet.fetch(potion)
pet.moveXY(28, 34)
pet.on("hear", onHear)
while True:
nearest = hero.findNearest(hero.findByType("mushroom"))
if nearest:
hero.moveXY(nearest.pos.x, nearest.pos.y)
| [
"katik.hello@gmail.com"
] | katik.hello@gmail.com |
181382f3f67c9810cdef2e21360bfe18a6ffac71 | 8df87b22b2689b93bbe418f2a7c7a69a2bebbd90 | /other_solutions/exception2.py | c0caeb7214d4bb038af405a05f0b4ef331c03595 | [] | no_license | SHUHAIB-AREEKKAN/automate_boring_stuff-Solutions | e3f25ec85e5a2b99f431430081c55c12c09c007b | 5df77b8b41d43f442671dd9595313f80c88be1d0 | refs/heads/master | 2021-01-25T08:27:42.810089 | 2017-06-08T15:43:28 | 2017-06-08T15:43:28 | 93,765,978 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | def spam(inputs):
return 42 / inputs
try:
print(spam(2))
print(spam(3))
print(spam(0))
print(spam(4))
except ZeroDivisionError:
print("occured zero division error")
| [
"homeshuhaib@gmail.com"
] | homeshuhaib@gmail.com |
69cbebcc23d85d8a69d474bc7321a1c3cd7515e4 | bc28f8fe941caf281261afa1641868e743ecb5ab | /Commonlounge/ZCO12001.py | f10af53a2fed4813735914c2dbd168b8f2dafbd9 | [] | no_license | anubhavshrimal/CompetitiveProgrammingInPython | 9fc6949fb3cd715cfa8544c17a63ffbe52677b37 | 2692c446d49ec62d4967ed78a7973400db7ce981 | refs/heads/master | 2021-07-05T08:17:15.182154 | 2018-05-29T02:26:25 | 2018-05-29T02:26:25 | 60,554,340 | 7 | 6 | null | 2021-05-24T17:46:16 | 2016-06-06T19:18:27 | Python | UTF-8 | Python | false | false | 721 | py |
n = int(input())
seq = input().split()
depth = 0
start_index = 0
curr_seq = 0
# maximum depth value
max_depth = 0
# start index of max depth
depth_start = 0
# maximum sequence length
max_seq_len = 0
# start index of max sequence
max_seq_start = 0
for i, b in enumerate(seq, start=1):
if b == '1':
depth += 1
if depth == 1:
start_index = i
elif b == '2':
depth -= 1
curr_seq += 1
if max_depth < depth:
max_depth = depth
depth_start = i
if depth == 0:
if curr_seq > max_seq_len:
max_seq_len = curr_seq
max_seq_start = start_index
curr_seq = 0
print(max_depth, depth_start, max_seq_len, max_seq_start)
| [
"anubhavshrimal@gmail.com"
] | anubhavshrimal@gmail.com |
a1a63d340e315a9235d8ab66fb99ae5cbec6881f | f6c40808efe1cf9e5f2c76a29a1dd1bf86df8d4d | /prism/code_gen.py | 77e98eddfaae72979089800d4f65eb489f600553 | [] | no_license | sahabi/versynth | 55709f3a19ab229f9a995830a47add83f9cc6fc4 | 9983344c03a1ee887d9b145655497b9862663212 | refs/heads/master | 2021-07-21T00:03:52.295043 | 2017-10-31T20:01:56 | 2017-10-31T20:01:56 | 109,047,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,998 | py | x_size = 4
y_size = 4
prism_code = "mdp\n"
prism_code += "module grid\n"
prism_code += " o_state : [0..{}] init 0;\n".format((x_size-1)*(y_size-1))
#prism_code += " a_x : [0..{}];\n".format(x_size-1)
#prism_code += " a_y : [0..{}];\n".format(y_size-1)
def stay(i,j,role):
return "({0}_state' = {state})".format(role,state=i+(x_size*j))
def left(i,j,role):
return "({0}_state' = {state})".format(role,state=(i-1)+(j*x_size))
def right(i,j,role):
return "({0}_state' = {state})".format(role,state=(i+1)+(x_size*j))
def up(i,j,role):
return "({0}_state' = {state})".format(role,state=i+((j+1)*x_size))
def down(i,j,role):
return "({0}_state' = {state})".format(role,state=i+((j-1)*x_size))
def current(i,j,role="o"):
return " [] {0}_state = {state} -> ".format(role,state=i+(j*x_size))
def is_bot_left(x,y):
return x == 0 == j
def is_top_right(x,y):
return x == x_size-1 and y == y_size-1
def is_top_left(x,y):
return x == 0 and y == y_size-1
def is_bot_right(x,y):
return x == x_size-1 and y == 0
def is_bot(y):
return y == 0
def is_top(y):
return y == y_size-1
def is_right(x):
return x == x_size-1
def is_left(x):
return x == 0
for i in range(x_size):
for j in range(y_size):
if is_bot_left(i,j):
prism_code += current(i,j,role="o")+\
"1/3 : "+right(i,j,role="o")+\
"+ 1/3 : "+up(i,j,role="o")+\
"+ 1/3 : "+stay(i,j,role="o")+\
";\n"
elif is_top_right(i,j):
prism_code += current(i,j,role="o")+\
"1/3 : "+left(i,j,role="o")+\
"+ 1/3 : "+down(i,j,role="o")+\
"+ 1/3 : "+stay(i,j,role="o")+\
";\n"
elif is_top_left(i,j):
prism_code += current(i,j,role="o")+\
"1/3 : "+right(i,j,role="o")+\
"+ 1/3 : "+down(i,j,role="o")+\
"+ 1/3 : "+stay(i,j,role="o")+\
";\n"
elif is_bot_right(i,j):
prism_code += current(i,j,role="o")+\
"1/3 : "+left(i,j,role="o")+\
"+ 1/3 : "+up(i,j,role="o")+\
"+ 1/3 : "+stay(i,j,role="o")+\
";\n"
elif is_bot(j):
prism_code += current(i,j,role="o")+\
"1/4 : "+left(i,j,role="o")+\
"+ 1/4 : "+up(i,j,role="o")+\
"+ 1/4 : "+stay(i,j,role="o")+\
"+ 1/4 : "+right(i,j,role="o")+\
";\n"
elif is_top(j):
prism_code += current(i,j,role="o")+\
"1/4 : "+left(i,j,role="o")+\
"+ 1/4 : "+down(i,j,role="o")+\
"+ 1/4 : "+stay(i,j,role="o")+\
"+ 1/4 : "+right(i,j,role="o")+\
";\n"
elif is_left(i):
prism_code += current(i,j,role="o")+\
"1/4 : "+down(i,j,role="oo")+\
"+ 1/4 : "+up(i,j,role="o")+\
"+ 1/4 : "+stay(i,j,role="o")+\
"+ 1/4 : "+right(i,j,role="o")+\
";\n"
elif is_right(i):
prism_code += current(i,j,role="o")+\
"1/4 : "+left(i,j,role="o")+\
"+ 1/4 : "+down(i,j,role="o")+\
"+ 1/4 : "+stay(i,j,role="o")+\
"+ 1/4 : "+up(i,j,role="o")+\
";\n"
else:
prism_code += current(i,j,role="o")+\
"1/5 : "+left(i,j,role="o")+\
"+ 1/5 : "+down(i,j,role="o")+\
"+ 1/5 : "+stay(i,j,role="o")+\
"+ 1/5 : "+up(i,j,role="o")+\
"+ 1/5 : "+right(i,j,role="o")+\
";\n"
prism_code = prism_code.format(y=j,py=j+1,ny=j-1,nx=i-1,px=i+1,x=i)
prism_code += "endmodule"
print prism_code
| [
"sahabi@gmail.com"
] | sahabi@gmail.com |
8b7797c26c13e1431cec28e5848ed994de0e8abc | f7dd190a665a4966db33dcc1cc461dd060ca5946 | /venv/Lib/site-packages/django/core/management/commands/startproject.py | 28867788a63c8555e1302fde581cfed0a80073fb | [] | no_license | Darwin939/macmeharder_back | 2cc35e2e8b39a82c8ce201e63d9f6a9954a04463 | 8fc078333a746ac7f65497e155c58415252b2d33 | refs/heads/main | 2023-02-28T12:01:23.237320 | 2021-02-02T17:37:33 | 2021-02-02T17:37:33 | 328,173,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | from django.core.management.templates import TemplateCommand
from ..utils import get_random_secret_key
class Command(TemplateCommand):
help = (
"Creates a Django project directory structure for the given project "
"name in the current directory or optionally in the given directory."
)
missing_args_message = "You must provide a project name."
def handle(self, **options):
project_name = options.pop('name')
target = options.pop('directory')
# Create a random SECRET_KEY to put it in the apps settings.
options['secret_key'] = get_random_secret_key()
super().handle('project', project_name, target, **options)
| [
"51247000+Darwin939@users.noreply.github.com"
] | 51247000+Darwin939@users.noreply.github.com |
58a6d22d2cdddb49ec012d5c542d653ca9e1b958 | 2038ede147bf85734f64f7be110e64db0725c0d3 | /gerapy/server/core/config.py | 5d0205438643bf0d32473c81797ccc644e5cdeec | [
"MIT"
] | permissive | hantmac/Gerapy | 24d17388af57208113199657b7c9c30fe1513b0b | 41b1221dba2b89b89ca98f6bfbcc045cafdac469 | refs/heads/master | 2020-04-26T01:51:19.997122 | 2019-06-03T03:10:58 | 2019-06-03T03:10:58 | 173,216,808 | 0 | 0 | MIT | 2019-03-01T01:51:31 | 2019-03-01T01:51:30 | null | UTF-8 | Python | false | false | 327 | py | import configparser
from os.path import join
def config(path, section, option, name='scrapy.cfg', default=None):
try:
cf = configparser.ConfigParser()
cfg_path = join(path, name)
cf.read(cfg_path)
return cf.get(section, option)
except configparser.NoOptionError:
return default
| [
"cqc@cuiqingcai.com"
] | cqc@cuiqingcai.com |
00bfd978db46af137fd5ad6cca417d3e8013d999 | c65319b258ce8c629bb11d31ed62f77a134c448a | /plugins/uvwgen/UVWGenExplicit.py | d9eb9a9bede36d3de02814a843d108946d5dfa13 | [] | no_license | Zaurio/vb30 | cacf3c254eaa7337e35e95572dfa2b83d4899ea9 | 3d75476c99a3dc91dec226bfb87f01aa598b0824 | refs/heads/master | 2020-04-05T23:31:26.220823 | 2015-06-22T12:03:09 | 2015-06-22T12:03:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,438 | py | #
# V-Ray For Blender
#
# http://chaosgroup.com
#
# Author: Andrei Izrantcev
# E-Mail: andrei.izrantcev@chaosgroup.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# All Rights Reserved. V-Ray(R) is a registered trademark of Chaos Software.
#
import bpy
TYPE = 'UVWGEN'
ID = 'UVWGenExplicit'
NAME = 'Explicit'
DESC = ""
PluginParams = (
{
'attr' : 'u',
'desc' : "the U input",
'type' : 'FLOAT_TEXTURE',
'default' : 1.0,
},
{
'attr' : 'v',
'desc' : "the V input",
'type' : 'FLOAT_TEXTURE',
'default' : 1.0,
},
{
'attr' : 'w',
'desc' : "the W input",
'type' : 'FLOAT_TEXTURE',
'default' : 1.0,
},
{
'attr' : 'uvw',
'desc' : "",
'type' : 'TEXTURE',
'default' : (0.0, 0.0, 0.0),
},
)
| [
"andrei.izrantcev@chaosgroup.com"
] | andrei.izrantcev@chaosgroup.com |
901e7e049f8ca3cfa56f942c4c0861432e4ad6ed | e63ab09f227459380c317aa1694cffd04255c807 | /cheshire3/web/oai_utils.py | 21df4073e90df00a0f54a32261ab276d67906c4c | [
"ICU",
"X11"
] | permissive | bitwhite/cheshire3 | 91a0d2f8d2e79ac277ac4f7a3bea9efa911ce3d6 | ca27bc2600d217e36a429ccfe064f11d9b200193 | refs/heads/master | 2021-05-27T03:50:09.456813 | 2013-10-10T13:47:16 | 2013-10-10T13:47:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,915 | py |
import sys
import urllib
import datetime
from lxml import etree
from cheshire3.record import LxmlRecord
# cheshire3.web package
from cheshire3.web.www_utils import cgi_encode
from cheshire3.web.sru_utils import fetch_data
# oaipmh package
from oaipmh.common import Header
NS_OAIPMH = 'http://www.openarchives.org/OAI/2.0/'
NS_XSI = 'http://www.w3.org/2001/XMLSchema-instance'
NS_OAIDC = 'http://www.openarchives.org/OAI/2.0/oai_dc/'
NS_DC = "http://purl.org/dc/elements/1.1/"
nsmap = {
None: NS_OAIPMH,
'xsi': NS_XSI,
'oai_dc': NS_OAIDC,
'dc': NS_DC
}
def headerFromLxmlElement(et):
identifier = et.xpath('string(//oai:identifier)', namespaces={'oai': NS_OAIPMH})
datestamp = et.xpath('string(//oai:datestamp)', namespaces={'oai': NS_OAIPMH})
datestamp = datetime.datetime.strptime(datestamp, '%Y-%m-%dT%H:%M:%SZ')
return Header(identifier, datestamp, [], None)
def getRecord(baseUrl, metadataPrefix, identifier):
"""Return (Header, metadata, about) tuple of record with specified identifier from the specified OAI-PMH server."""
args = {'verb': "GetRecord",
'metadataPrefix': metadataPrefix,
'identifier': identifier}
params = urllib.urlencode(args)
url = "{0}?{1}".format(baseUrl, params)
data = fetch_data(url)
try:
tree = etree.fromstring(data)
except:
sys.stderr.write(url + '\n')
sys.stderr.write(data + '\n')
sys.stderr.flush()
raise
hEl = tree.xpath('//oai:record[1]/oai:header', namespaces={'oai': NS_OAIPMH})[0]
header = headerFromLxmlElement(hEl)
recEl = tree.xpath('//oai:record[1]/oai:metadata/*', namespaces={'oai': NS_OAIPMH})[0]
recString = etree.tostring(recEl)
rec = LxmlRecord(recEl, xml=recString, docId=identifier, byteCount=len(recString))
return (header, rec, None)
def listIdentifiers(baseUrl, metadataPrefix, set=None, from_=None, until=None, cursor=0, batch_size=10):
"""Return a list of Headers with the given parameters from the specified OAI-PMH server."""
args = {'verb': "ListIdentifiers",
'metadataPrefix': metadataPrefix
}
if set is not None:
args['set'] = set
if from_ is not None:
args['from'] = str(from_)
if until is not None:
args['until'] = str(until)
params = urllib.urlencode(args)
url = "{0}?{1}".format(baseUrl, params)
data = fetch_data(url)
headers = []
while data is not None:
try:
tree = etree.fromstring(data)
except:
sys.stderr.write(url + '\n')
sys.stderr.write(data + '\n')
sys.stderr.flush()
raise
for h in tree.xpath('//oai:header', namespaces={'oai': NS_OAIPMH}):
headers.append(headerFromLxmlElement(h))
resTok = tree.xpath('string(//oai:resumptionToken)', namespaces={'oai': NS_OAIPMH})
if resTok:
params = urllib.urlencode({'verb': "ListIdentifiers",
'resumptionToken': resTok})
url = "{0}?{1}".format(baseUrl, params)
data = fetch_data(url)
else:
break
return headers
def listRecords(baseUrl, metadataPrefix, set=None, from_=None, until=None, cursor=0, batch_size=10):
"""Return a list of (Header, metadata, about) tuples for records which match the given parameters from the specified OAI-PMH server."""
args = {'verb': "ListRecords",
'metadataPrefix': metadataPrefix
}
if set is not None:
args['set'] = set
if from_ is not None:
args['from'] = str(from_)
if until is not None:
args['until'] = str(until)
params = urllib.urlencode(args)
url = "{0}?{1}".format(baseUrl, params)
data = fetch_data(url)
records = []
i = 0
while (data is not None):
try:
tree = etree.fromstring(data)
except:
print url
print data
raise
for recEl in tree.xpath('//oai:record', namespaces={'oai': NS_OAIPMH}):
if i < cursor:
i+=1
continue
hEl = recEl.xpath('//oai:header', namespaces={'oai': NS_OAIPMH})[0]
header = headerFromLxmlElement(hEl)
mdEl = recEl.xpath('//oai:metadata/*', namespaces={'oai': NS_OAIPMH})[0]
recString = etree.tostring(mdEl)
rec = LxmlRecord(mdEl, xml=recString, docId=header.identifier(), byteCount=len(recString))
records.append((header, rec, None))
i+=1
if (len(headers) >= batch_size):
return headers
resTok = tree.xpath('string(//oai:resumptionToken)', namespaces={'oai': NS_OAIPMH})
if resTok:
data = fetch_data(url + '&resumptionToken=' + cgi_encode(resTok))
else:
break
return records | [
"info@cheshire3.org"
] | info@cheshire3.org |
7c1a3cc65ce60f6935f521faff84138422688e97 | 51f2492a5c207e3664de8f6b2d54bb93e313ca63 | /codejam/2018-qualification/b-gen.py | ab3085506c3ea30bcf7c2b081167f31b1478bfea | [
"WTFPL",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | abeaumont/competitive-programming | 23c5aabd587d7bb15a61efd3428838cb934233dd | a24c9b89941a59d344b51dc1010de66522b1a0dd | refs/heads/master | 2023-09-01T09:50:58.267361 | 2023-07-31T18:00:10 | 2023-07-31T18:00:10 | 117,589,708 | 618 | 262 | WTFPL | 2023-07-12T17:36:20 | 2018-01-15T20:00:56 | C++ | UTF-8 | Python | false | false | 297 | py | #!/usr/bin/env python3
# https://codejam.withgoogle.com/2018/challenges/00000000000000cb/dashboard/00000000000079cb
# Input generator
import random
t = 100
print(t)
for i in range(t):
n = random.randint(3, 100)
print(n)
print(' '.join([str(random.randint(0, 100)) for j in range(n)]))
| [
"alfredo.beaumont@gmail.com"
] | alfredo.beaumont@gmail.com |
a906710b0674f1b2a9c5ead838e6511aa3907a37 | c3e872e0d019da55317b0ef5993f66bde8a6c021 | /model/ppy.py | 9d7719c09d775447ae061f2678a63aae17fe6428 | [] | no_license | liyonghelpme/wanderEmpire | 0a10b4bbf61b89e4e4eeefc59ad794d02682684b | dea41cbabe04edd99c0412ebbc3f5d0f45142f09 | refs/heads/master | 2016-09-06T06:27:09.366029 | 2011-09-23T14:25:17 | 2011-09-23T14:25:17 | 2,444,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | from sqlalchemy import Table, Column
class Papayafriend(object):
def __init__(self,uid,papayaid,lev,user_kind):
self.uid=uid
self.papayaid=papayaid
self.lev=self
self.user_kind=user_kind | [
"liyonghelpme@gmail.com"
] | liyonghelpme@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.