blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
629ad7161947983e30003f917353693a04574f14
|
ad1d46b4ec75ef1f00520ff246d0706c6bb7770e
|
/content/chapters/how-to-browse-sequences/25.py
|
e0850b41997e10de7e8f2f2fdf52af7e4fd0a553
|
[] |
no_license
|
roberto-arista/PythonForDesigners
|
036f69bae73095b6f49254255fc473a8ab7ee7bb
|
1a781ea7c7ee21e9c64771ba3bf5634ad550692c
|
refs/heads/master
| 2022-02-24T15:28:04.167558
| 2021-09-07T10:37:01
| 2021-09-07T10:37:01
| 168,937,263
| 103
| 37
| null | 2022-02-11T02:24:01
| 2019-02-03T11:17:51
|
Python
|
UTF-8
|
Python
| false
| false
| 234
|
py
|
linesAmount = 4
newPage(100, 100)
stroke(0)
for eachLineIndex in range(linesAmount):
# the width() function provides the canvas width in pts
quota = width()/(linesAmount+1)*(eachLineIndex+1)
line((20, quota), (80, quota))
|
[
"arista.rob@gmail.com"
] |
arista.rob@gmail.com
|
4ba530650c4721fb280087ccbc644b3749bde818
|
f159aeec3408fe36a9376c50ebb42a9174d89959
|
/908.Smallest-Range-I.py
|
3508da6f00d256b33a6feb475eb1f2fc769042a6
|
[
"MIT"
] |
permissive
|
mickey0524/leetcode
|
83b2d11ab226fad5da7198bb37eeedcd8d17635a
|
fc5b1744af7be93f4dd01d6ad58d2bd12f7ed33f
|
refs/heads/master
| 2023-09-04T00:01:13.138858
| 2023-08-27T07:43:53
| 2023-08-27T07:43:53
| 140,945,128
| 27
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 428
|
py
|
# https://leetcode.com/problems/monotonic-array/description/
#
# algorithms
# Easy (65.12%)
# Total Accepted: 2.8k
# Total Submissions: 4.3k
class Solution(object):
def smallestRangeI(self, A, K):
"""
:type A: List[int]
:type K: int
:rtype: int
"""
if len(A) == 1:
return 0
diff = max(A) - min(A)
return 0 if diff <= 2 * K else diff - 2 * K
|
[
"buptbh@163.com"
] |
buptbh@163.com
|
fa3751191ddf11b217c6eb670413a9adf50781c4
|
cb61ba31b27b232ebc8c802d7ca40c72bcdfe152
|
/leetcode/MaxSubArray/solution.py
|
bce136e8c8ff078608c00208d22aac159de2eb60
|
[
"Apache-2.0"
] |
permissive
|
saisankargochhayat/algo_quest
|
c7c48187c76b5cd7c2ec3f0557432606e9096241
|
a24f9a22c019ab31d56bd5a7ca5ba790d54ce5dc
|
refs/heads/master
| 2021-07-04T15:21:33.606174
| 2021-02-07T23:42:43
| 2021-02-07T23:42:43
| 67,831,927
| 5
| 1
|
Apache-2.0
| 2019-10-28T03:51:03
| 2016-09-09T20:51:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,926
|
py
|
# https://leetcode.com/problems/maximum-subarray/
class Solution:
def find_max_subarray(self, A, low, high):
# means only one element present.
if high == low:
return (low, high, A[low])
else:
mid = (low + high) // 2
left_low, left_high, left_sum = self.find_max_subarray(A, low, mid)
right_low, right_high, right_sum = self.find_max_subarray(A, mid+1, high)
cross_low, cross_high, cross_sum = self.find_max_crossing_subarray(A, low, mid, high)
if left_sum >= right_sum and left_sum >= cross_sum:
return (left_low, left_high, left_sum)
elif right_sum >= left_sum and right_sum >= cross_sum:
return (right_low, right_high, right_sum)
else:
return (cross_low, cross_high, cross_sum)
def find_max_crossing_subarray(self, A, low, mid, high):
left_sum = -(math.inf)
sum = 0
max_left, max_right = 0, 0
for i in range(mid, low-1, -1):
sum = sum + A[i]
if sum > left_sum:
left_sum = sum
max_left = i
right_sum = -(math.inf)
sum = 0
for j in range(mid+1, high+1):
sum = sum + A[j]
if sum > right_sum:
right_sum = sum
max_right = j
return (max_left, max_right, left_sum+right_sum)
def maxSubArray(self, nums: List[int]) -> int:
# bestSum = min(nums)
# tailSum = 0
# for n in nums:
# tailSum += n
# bestSum = max(tailSum, bestSum)
# tailSum = max(tailSum, 0)
# return bestSum
# divide and conquer soln below
max_left, max_right, max_sum = self.find_max_subarray(nums, 0, len(nums)-1)
#print(max_left, max_right)
return max_sum
|
[
"saisankargochhayat@gmail.com"
] |
saisankargochhayat@gmail.com
|
2dd0237030d554f35a13f1533f56214f68389736
|
a3d72c9d47a3711ff1a7213da25bacdcb3a7aa32
|
/stickerfinder/models/inline_query.py
|
230faf23540fc4f4ecebb19740a86738467102a4
|
[
"MIT"
] |
permissive
|
crashcoredump/sticker-finder
|
225a46c586d1b2b8764cf325e296186cbece5edd
|
8158724ebc3e8346012d0ede05a75bb8f9f5f7eb
|
refs/heads/master
| 2020-08-26T23:28:56.991893
| 2019-10-23T22:34:58
| 2019-10-23T22:34:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,059
|
py
|
"""The sqlite model for a inline search."""
from sqlalchemy.orm import relationship
from sqlalchemy import (
Column,
func,
ForeignKey,
)
from sqlalchemy.types import (
BigInteger,
DateTime,
String,
)
from stickerfinder.db import base
from stickerfinder.config import config
class InlineQuery(base):
"""The model for a inline search."""
SET_MODE = 'sticker_set'
STICKER_MODE = 'sticker'
__tablename__ = 'inline_query'
id = Column(BigInteger, primary_key=True)
query = Column(String, nullable=False)
mode = Column(String, nullable=False, default='sticker')
created_at = Column(DateTime, server_default=func.now(), nullable=False)
user_id = Column(BigInteger, ForeignKey('user.id'), index=True)
sticker_file_id = Column(String, ForeignKey('sticker.file_id',
onupdate='cascade',
ondelete='cascade'), index=True)
user = relationship("User")
sticker = relationship("Sticker")
requests = relationship("InlineQueryRequest",
order_by="asc(InlineQueryRequest.created_at)")
def __init__(self, query, user):
"""Create a new change."""
self.query = query
self.user = user
self.bot = config['telegram']['bot_name']
def __repr__(self):
"""Print as string."""
return f'InlineQuery: {self.query}, user: {self.user_id}'
@staticmethod
def get_or_create(session, query_id, query, user):
"""Get or create the InlineQuery."""
if query_id:
# Save this inline search for performance measurement
inline_query = session.query(InlineQuery).get(query_id)
else:
# We have an offset request of an existing InlineQuery.
# Reuse the existing one and add the new InlineQueryRequest to this query.
inline_query = InlineQuery(query, user)
session.add(inline_query)
session.commit()
return inline_query
|
[
"arne@twobeer.de"
] |
arne@twobeer.de
|
56ff8207e78b231e1fea0c024582ba1d89105dca
|
2b86301d5ad3fecaa5a300cabfe6b4dfc82b78ed
|
/venv/Lib/site-packages/adodbapi/process_connect_string.py
|
a8dab5a10443ed24537ce76705a9c1de02f5ef71
|
[
"MIT",
"LGPL-2.1-only",
"LGPL-2.0-or-later"
] |
permissive
|
sserrot/champion_relationships
|
72823bbe73e15973007e032470d7efdf72af3be0
|
91315d6b7f6e7e678d9f8083b4b3e63574e97d2b
|
refs/heads/master
| 2022-12-21T05:15:36.780768
| 2021-12-05T15:19:09
| 2021-12-05T15:19:09
| 71,414,425
| 1
| 2
|
MIT
| 2022-12-18T07:42:59
| 2016-10-20T01:35:56
|
Python
|
UTF-8
|
Python
| false
| false
| 5,376
|
py
|
""" a clumsy attempt at a macro language to let the programmer execute code on the server (ex: determine 64bit)"""
from . import is64bit as is64bit
def macro_call(macro_name, args, kwargs):
""" allow the programmer to perform limited processing on the server by passing macro names and args
:new_key - the key name the macro will create
:args[0] - macro name
:args[1:] - any arguments
:code - the value of the keyword item
:kwargs - the connection keyword dictionary. ??key has been removed
--> the value to put in for kwargs['name'] = value
"""
if isinstance(args, (str, str)):
args = [args] # the user forgot to pass a sequence, so make a string into args[0]
new_key = args[0]
try:
if macro_name == "is64bit":
if is64bit.Python(): # if on 64 bit Python
return new_key, args[1] # return first argument
else:
try:
return new_key, args[2] # else return second argument (if defined)
except IndexError:
return new_key, '' # else return blank
elif macro_name == "getuser": # get the name of the user the server is logged in under
if not new_key in kwargs:
import getpass
return new_key, getpass.getuser()
elif macro_name == "getnode": # get the name of the computer running the server
import platform
try:
return new_key, args[1] % platform.node()
except IndexError:
return new_key, platform.node()
elif macro_name == "getenv": # expand the server's environment variable args[1]
try:
dflt = args[2] # if not found, default from args[2]
except IndexError: # or blank
dflt = ''
return new_key, os.environ.get(args[1], dflt)
elif macro_name == "auto_security":
if not 'user' in kwargs or not kwargs['user']: # missing, blank, or Null username
return new_key, 'Integrated Security=SSPI'
return new_key, 'User ID=%(user)s; Password=%(password)s' % kwargs
elif macro_name == "find_temp_test_path": # helper function for testing ado operation -- undocumented
import tempfile, os
return new_key, os.path.join(tempfile.gettempdir(), 'adodbapi_test', args[1])
raise ValueError ('Unknown connect string macro=%s' % macro_name)
except:
raise ValueError ('Error in macro processing %s %s' % (macro_name, repr(args)))
def process(args, kwargs, expand_macros=False): # --> connection string with keyword arguments processed.
""" attempts to inject arguments into a connection string using Python "%" operator for strings
co: adodbapi connection object
args: positional parameters from the .connect() call
kvargs: keyword arguments from the .connect() call
"""
try:
dsn = args[0]
except IndexError:
dsn = None
if isinstance(dsn, dict): # as a convenience the first argument may be django settings
kwargs.update(dsn)
elif dsn: # the connection string is passed to the connection as part of the keyword dictionary
kwargs['connection_string'] = dsn
try:
a1 = args[1]
except IndexError:
a1 = None
# historically, the second positional argument might be a timeout value
if isinstance(a1, int):
kwargs['timeout'] = a1
# if the second positional argument is a string, then it is user
elif isinstance(a1, str):
kwargs['user'] = a1
# if the second positional argument is a dictionary, use it as keyword arguments, too
elif isinstance(a1, dict):
kwargs.update(a1)
try:
kwargs['password'] = args[2] # the third positional argument is password
kwargs['host'] = args[3] # the fourth positional argument is host name
kwargs['database'] = args[4] # the fifth positional argument is database name
except IndexError:
pass
# make sure connection string is defined somehow
if not 'connection_string' in kwargs:
try: # perhaps 'dsn' was defined
kwargs['connection_string'] = kwargs['dsn']
except KeyError:
try: # as a last effort, use the "host" keyword
kwargs['connection_string'] = kwargs['host']
except KeyError:
raise TypeError ("Must define 'connection_string' for ado connections")
if expand_macros:
for kwarg in list(kwargs.keys()):
if kwarg.startswith('macro_'): # If a key defines a macro
macro_name = kwarg[6:] # name without the "macro_"
macro_code = kwargs.pop(kwarg) # we remove the macro_key and get the code to execute
new_key, rslt = macro_call(macro_name, macro_code, kwargs) # run the code in the local context
kwargs[new_key] = rslt # put the result back in the keywords dict
# special processing for PyRO IPv6 host address
try:
s = kwargs['proxy_host']
if ':' in s: # it is an IPv6 address
if s[0] != '[': # is not surrounded by brackets
kwargs['proxy_host'] = s.join(('[',']')) # put it in brackets
except KeyError:
pass
return kwargs
|
[
"sserrot@users.noreply.github.com"
] |
sserrot@users.noreply.github.com
|
49e6d7da11e749a220fab3a24a443fe317ea54ab
|
6e1ea8d65052f025060453f66819ac446a7592d8
|
/archive/neg_190406_3.py
|
e062dc3d131ae0f2f2565440c8ac7a7146698d5d
|
[] |
no_license
|
csJd/oj-codes
|
bc38b79b9227b45d34139c84f9ef1e830f6b996c
|
2bd65d0070adc3400ee5bee8d1cf02a038de540b
|
refs/heads/master
| 2022-03-21T20:21:40.153833
| 2019-11-28T13:11:54
| 2019-11-28T13:11:54
| 47,266,594
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,801
|
py
|
"""
题目描述:n 个 电池, m 条电线 (电线是单向的!) , 电源连着电池 b, 电源功率为 e
某个电池充满电后通向其的功率会平均分往所连的电池
输入 n 个电池的容量
输出每个电池充满电所需时间
2
6 6 1 2
4 2 4 2 2 20
1 2
1 3
2 4
3 5
4 6
5 6
2 0 2 1
10 10
2.0000 4.0000 6.0000 6.0000 8.0000 17.0000
-1.0000 10.0000
"""
T = int(input())
for case in range(T):
n, m, b, e = map(int, input().split())
remain = list(map(int, input().split()))
time_spent = [0 for i in range(n)]
edge = [[] for i in range(n)]
for k in range(m):
st, ed = map(int, input().split())
st -= 1
ed -= 1
edge[st].append(ed)
# edge[ed].append(st)
b -= 1
charging = {b: e}
while len(charging) > 0:
pos = list(charging.keys())[0]
power = charging.pop(pos)
charge_time = remain[pos] / power
time_spent[pos] += charge_time
remain[pos] = 0
for ind in range(n):
if remain[ind] == 0:
continue
time_spent[ind] += charge_time
if ind in charging:
power = charging[ind]
remain[ind] -= power * charge_time
if len(edge[pos]) == 0:
continue
add_power = power / len(edge[pos])
for ed in edge[pos]:
if ed not in charging:
charging[ed] = add_power
else:
charging.pop(ind)
charging[ind] = power + add_power
charging = dict(
sorted(charging.items(), key=lambda tup: remain[tup[0]]/tup[1]))
ret = []
for i in range(n):
if remain[i] == 0:
ret.append(time_spent[i])
else:
ret.append(-1)
print(*ret)
|
[
"d.vay@qq.com"
] |
d.vay@qq.com
|
c22785ef3b2a5baca3fdb90c053ddeda973ccced
|
c8371b410f19dc87059bbe0a28e983c3cfe0f4f8
|
/src/etheroll/settings.py
|
8e2e0292db8743d6e368ca46c0f468eb6156dac1
|
[
"MIT"
] |
permissive
|
homdx/EtherollApp
|
c70e37cff4fbbde8c605a8ca87776535185a7167
|
4953ce0f10ac58d43517fbc3a18bc5ed43297858
|
refs/heads/master
| 2020-03-28T19:05:10.591229
| 2018-09-30T21:25:32
| 2018-09-30T21:25:32
| 148,942,827
| 0
| 0
|
MIT
| 2018-09-15T21:52:51
| 2018-09-15T21:52:51
| null |
UTF-8
|
Python
| false
| false
| 2,576
|
py
|
from etheroll.store import Store
from etheroll.utils import SubScreen, load_kv_from_py
from pyetheroll.constants import DEFAULT_GAS_PRICE_GWEI, ChainID
load_kv_from_py(__file__)
class SettingsScreen(SubScreen):
"""
Screen for configuring network, gas price...
"""
def __init__(self, **kwargs):
super(SettingsScreen, self).__init__(**kwargs)
def store_network(self):
"""
Saves selected network to the store.
"""
store = Store.get_store()
network = self.get_ui_network()
store.put('network', value=network.name)
def store_gas_price(self):
"""
Saves gas price value to the store.
"""
store = Store.get_store()
gas_price = self.get_ui_gas_price()
store.put('gas_price', value=gas_price)
def store_settings(self):
"""
Stores settings to json store.
"""
self.store_gas_price()
self.store_network()
def get_ui_network(self):
"""
Retrieves network values from UI.
"""
if self.is_ui_mainnet():
network = ChainID.MAINNET
else:
network = ChainID.ROPSTEN
return network
def is_ui_mainnet(self):
return self.ids.mainnet_checkbox_id.active
def is_ui_testnet(self):
return self.ids.testnet_checkbox_id.active
@staticmethod
def get_stored_network():
"""
Retrieves last stored network value, defaults to Mainnet.
"""
store = Store.get_store()
try:
network_dict = store['network']
except KeyError:
network_dict = {}
network_name = network_dict.get(
'value', ChainID.MAINNET.name)
network = ChainID[network_name]
return network
@classmethod
def is_stored_mainnet(cls):
network = cls.get_stored_network()
return network == ChainID.MAINNET
@classmethod
def is_stored_testnet(cls):
network = cls.get_stored_network()
return network == ChainID.ROPSTEN
def get_ui_gas_price(self):
return self.ids.gas_price_slider_id.value
@staticmethod
def get_stored_gas_price():
"""
Retrieves stored gas price value, defaults to DEFAULT_GAS_PRICE_GWEI.
"""
store = Store.get_store()
try:
gas_price_dict = store['gas_price']
except KeyError:
gas_price_dict = {}
gas_price = gas_price_dict.get(
'value', DEFAULT_GAS_PRICE_GWEI)
return gas_price
|
[
"andre.miras@gmail.com"
] |
andre.miras@gmail.com
|
5197c2424a9a662ba7a7350d9247e1a5e7ccc646
|
e2d23d749779ed79472a961d2ab529eeffa0b5b0
|
/gcloud/commons/template/apps.py
|
afc2887f420be79a219cef1a5aafb3839402098a
|
[
"MIT",
"BSD-3-Clause",
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
manlucas/atom
|
9fa026b3f914e53cd2d34aecdae580bda09adda7
|
94963fc6fdfd0568473ee68e9d1631f421265359
|
refs/heads/master
| 2022-09-30T06:19:53.828308
| 2020-01-21T14:08:36
| 2020-01-21T14:08:36
| 235,356,376
| 0
| 0
|
NOASSERTION
| 2022-09-16T18:17:08
| 2020-01-21T14:04:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,016
|
py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.apps import AppConfig
class CommonTemplateConfig(AppConfig):
name = 'gcloud.commons.template'
verbose_name = 'GcloudCommonTemplate'
def ready(self):
from gcloud.commons.template.permissions import common_template_resource # noqa
|
[
"lucaswang@canway.net"
] |
lucaswang@canway.net
|
e39396344dfa389cd482fb0f09a82d0ffbd041e9
|
930a868ae9bbf85df151b3f54d04df3a56bcb840
|
/benchmark/union_find_decoder/atomic_qubit_model/perfect_measurement/run_experiment.py
|
4f1ba3312e3746483f2566fb68d9402c43a492e5
|
[
"MIT"
] |
permissive
|
yuewuo/QEC-Playground
|
1148f3c5f4035c069986d8b4103acf7f1e34f9d4
|
462208458cdf9dc8a33d4553a560f8a16c00e559
|
refs/heads/main
| 2023-08-10T13:05:36.617858
| 2023-07-22T23:48:49
| 2023-07-22T23:48:49
| 312,809,760
| 16
| 1
|
MIT
| 2023-07-22T23:48:51
| 2020-11-14T12:10:38
|
Python
|
UTF-8
|
Python
| false
| false
| 3,372
|
py
|
import os, sys
import subprocess, sys
qec_playground_root_dir = subprocess.run("git rev-parse --show-toplevel", cwd=os.path.dirname(os.path.abspath(__file__)), shell=True, check=True, capture_output=True).stdout.decode(sys.stdout.encoding).strip(" \r\n")
rust_dir = os.path.join(qec_playground_root_dir, "backend", "rust")
fault_toleran_MWPM_dir = os.path.join(qec_playground_root_dir, "benchmark", "fault_tolerant_MWPM")
sys.path.insert(0, fault_toleran_MWPM_dir)
from automated_threshold_evaluation import qec_playground_fault_tolerant_MWPM_simulator_runner_vec_command
from automated_threshold_evaluation import run_qec_playground_command_get_stdout
# RUST_BACKTRACE=full cargo run --release -- tool fault_tolerant_benchmark [3] [0] [5e-1,2e-1,1e-1,5e-2,2e-2,1e-2,5e-3,2e-3,1e-3,5e-4,2e-4,1e-4,5e-5,2e-5,1e-5] -p0-m100000000 --shallow_error_on_bottom --decoder UF --max_half_weight 10
# RUST_BACKTRACE=full cargo run --release -- tool fault_tolerant_benchmark [5] [0] [5e-1,2e-1,1e-1,5e-2,2e-2,1e-2,5e-3,2e-3] -p0-m100000000 --shallow_error_on_bottom --decoder UF --max_half_weight 10
# RUST_BACKTRACE=full cargo run --release -- tool fault_tolerant_benchmark [7] [0] [5e-1,2e-1,1e-1,5e-2,2e-2,1e-2,5e-3,2e-3] -p0-m100000000 --shallow_error_on_bottom -e1000 --decoder UF --max_half_weight 10
# RUST_BACKTRACE=full cargo run --release -- tool fault_tolerant_benchmark [9] [0] [5e-1,2e-1,1e-1,5e-2,2e-2,1e-2,5e-3,2e-3] -p0-m100000000 --shallow_error_on_bottom -e1000 --decoder UF --max_half_weight 10
# RUST_BACKTRACE=full cargo run --release -- tool fault_tolerant_benchmark [11] [0] [5e-1,2e-1,1e-1,5e-2,2e-2,1e-2,5e-3,2e-3] -p0-m100000000 --shallow_error_on_bottom -e1000 --decoder UF --max_half_weight 10
# RUST_BACKTRACE=full cargo run --release -- tool fault_tolerant_benchmark [13] [0] [5e-1,2e-1,1e-1,5e-2,2e-2,1e-2,5e-3,2e-3] -p0-m100000000 --shallow_error_on_bottom -e1000 --decoder UF --max_half_weight 10
di_vec = [3, 5, 7, 9, 11, 13]
p_vec = [0.5 * (10 ** (- i / 10)) for i in range(10 * 2 + 1)]
print(p_vec)
min_error_cases = 1000
# debug configurations
# di_vec = [3, 5]
# p_vec = [0.5 * (10 ** (- i / 3)) for i in range(3)]
# min_error_cases = 100
max_N = 100000000
UF_parameters = f"-p0 --shallow_error_on_bottom --decoder UF --max_half_weight 10 --time_budget 3600".split(" ")
results = []
for di in di_vec:
for p in p_vec:
p_pauli = p * 0.05
p_erasure = p * 0.95
UF_command = qec_playground_fault_tolerant_MWPM_simulator_runner_vec_command([p_pauli], [di], [di], [0], UF_parameters + ["--pes", f"[{p_erasure}]"], max_N=max_N, min_error_cases=min_error_cases)
print(" ".join(UF_command))
# run experiment
stdout, returncode = run_qec_playground_command_get_stdout(UF_command)
print("\n" + stdout)
assert returncode == 0, "command fails..."
# full result
full_result = stdout.strip(" \r\n").split("\n")[-1]
lst = full_result.split(" ")
error_count = int(lst[4])
error_rate = float(lst[5])
confidence_interval = float(lst[7])
# record result
print_result = f"{p} " + full_result
results.append(print_result)
print(print_result)
if error_count < 100:
break # next is not trust-worthy, ignore every p behind it
results.append("")
print("\n\n")
print("\n".join(results))
|
[
"yue.wu@yale.edu"
] |
yue.wu@yale.edu
|
be0b75475d921e2b3150d46ad352382e2d5dc7bb
|
dfc57c864598d8c6cf4a438fdf683722b8bdfa97
|
/datastax_cassandra_deploy/utils.py
|
fcb6c03f5d309032d32f092db183ad5a18d0f545
|
[
"Apache-2.0"
] |
permissive
|
ownport/datastax-cassandra-deploy
|
9432ee7ffafb50feef066fe493d8876b9da4beb9
|
5eeb24f09ea7ae63f6234c75279f3592b4557400
|
refs/heads/master
| 2021-06-17T15:41:52.785970
| 2019-06-25T19:34:33
| 2019-06-25T19:34:33
| 191,298,874
| 1
| 0
|
Apache-2.0
| 2021-03-25T22:40:53
| 2019-06-11T05:18:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,665
|
py
|
import json
import yaml
import logging
logger = logging.getLogger(__name__)
def load_deployment(deployments):
''' reused from https://github.com/ansible/ansible/blob/devel/lib/ansible/utils/vars.py
and modified according to DataStax Cassandra deployment requirements
'''
result = {}
if not deployments:
return result
for deployment_file in deployments:
data = None
# Argument is a YAML file (JSON is a subset of YAML)
try:
with open(deployment_file, 'r', encoding='utf8') as source:
try:
deployment = yaml.load(source, Loader=yaml.FullLoader)
except yaml.YAMLError as err:
logger.error('{}, {}'.format(err, _vars))
except FileNotFoundError as err:
logger.error(err)
continue
if deployment and isinstance(deployment, dict):
result.update(deployment)
return result
def pretty_json(data):
''' returns pretty json
'''
return json.dumps(data, sort_keys=True, indent=4)
def remove_none_values(params):
''' remove elements with None values
'''
if not isinstance(params, dict):
return params
result = dict()
for k, v in params.items():
if v is not None:
result[k] = v
return result
def hide_sensetive_fields(params):
''' replace sensitive information by '*'
'''
if not isinstance(params, dict):
return params
for k, v in params.items():
if k in ('login-password', 'become-password', 'password'):
params[k] = '*' * 10
return params
|
[
"ownport@gmail.com"
] |
ownport@gmail.com
|
ae95c88713e0cb71918e8f7b95f9960e7bfb1160
|
9a063667fb866fc3be52910e8dab53b73c144e7d
|
/forecast/auth/views.py
|
b27f15d7233fb8340c071ad09b54cdd67b27b4c6
|
[
"MIT"
] |
permissive
|
lockefox/ProsperForecast
|
45634904ce5f51628754dba791c414db23189787
|
eccba1ba6e5e2322c20bec65421fb531acdf5f43
|
refs/heads/master
| 2020-04-07T18:03:21.192557
| 2018-11-30T20:58:54
| 2018-11-30T20:58:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,587
|
py
|
from flask import request, jsonify, Blueprint, current_app as app
from flask_jwt_extended import (
create_access_token,
create_refresh_token,
jwt_required,
jwt_refresh_token_required,
get_jwt_identity,
get_raw_jwt
)
from forecast.models import User
from forecast.extensions import pwd_context, jwt
from forecast.auth.helpers import (
revoke_token,
is_token_revoked,
add_token_to_database
)
blueprint = Blueprint('auth', __name__, url_prefix='/auth')
@blueprint.route('/login', methods=['POST'])
def login():
"""Authenticate user and return token
"""
if not request.is_json:
return jsonify({"msg": "Missing JSON in request"}), 400
username = request.json.get('username', None)
password = request.json.get('password', None)
if not username or not password:
return jsonify({"msg": "Missing username or password"}), 400
user = User.query.filter_by(username=username).first()
if user is None or not pwd_context.verify(password, user.password):
return jsonify({"msg": "Bad credentials"}), 400
access_token = create_access_token(identity=user.id)
refresh_token = create_refresh_token(identity=user.id)
add_token_to_database(access_token, app.config['JWT_IDENTITY_CLAIM'])
add_token_to_database(refresh_token, app.config['JWT_IDENTITY_CLAIM'])
ret = {
'access_token': access_token,
'refresh_token': refresh_token
}
return jsonify(ret), 200
@blueprint.route('/refresh', methods=['POST'])
@jwt_refresh_token_required
def refresh():
current_user = get_jwt_identity()
access_token = create_access_token(identity=current_user)
ret = {
'access_token': access_token
}
add_token_to_database(access_token, app.config['JWT_IDENTITY_CLAIM'])
return jsonify(ret), 200
@blueprint.route('/revoke_access', methods=['DELETE'])
@jwt_required
def revoke_access_token():
jti = get_raw_jwt()['jti']
user_identity = get_jwt_identity()
revoke_token(jti, user_identity)
return jsonify({"message": "token revoked"}), 200
@blueprint.route('/revoke_refresh', methods=['DELETE'])
@jwt_refresh_token_required
def revoke_refresh_token():
jti = get_raw_jwt()['jti']
user_identity = get_jwt_identity()
revoke_token(jti, user_identity)
return jsonify({"message": "token revoked"}), 200
@jwt.user_loader_callback_loader
def user_loader_callback(identity):
return User.query.get(identity)
@jwt.token_in_blacklist_loader
def check_if_token_revoked(decoded_token):
return is_token_revoked(decoded_token)
|
[
"locke.renard@gmail.com"
] |
locke.renard@gmail.com
|
f3712b6de2d824dba7c3ff61ab63cb05a89d7b24
|
0211c16c9a3785444e5204156adafb53a8d8c93c
|
/src/spaceone/identity/service/domain_owner_service.py
|
01c0bf7823d294377de40e8eb37e3d53e5a2790d
|
[
"Apache-2.0"
] |
permissive
|
pyengine/identity
|
b85eed6513225b20db998c01832bc316a1cd4f24
|
4ab9feb3694851fe41b74ecce75c6f5b4f00549f
|
refs/heads/master
| 2022-12-12T04:44:55.472721
| 2020-09-11T02:42:01
| 2020-09-11T02:42:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,233
|
py
|
import logging
from spaceone.core.service import *
from spaceone.identity.manager import DomainManager, DomainOwnerManager
from spaceone.identity.manager.domain_secret_manager import DomainSecretManager
from spaceone.identity.model import Domain
#@authentication_handler
#@authorization_handler
@event_handler
class DomainOwnerService(BaseService):
def __init__(self, metadata):
super().__init__(metadata)
self.domain_owner_mgr: DomainOwnerManager = self.locator.get_manager('DomainOwnerManager')
@transaction
@check_required(['owner_id', 'password', 'domain_id'])
def create_owner(self, params):
return self.domain_owner_mgr.create_owner(params)
@transaction
@check_required(['owner_id', 'domain_id'])
def update_owner(self, params):
return self.domain_owner_mgr.update_owner(params)
@transaction
@check_required(['domain_id', 'owner_id'])
def delete_owner(self, params):
self.domain_owner_mgr.delete_owner(params['domain_id'], params['owner_id'])
@transaction
@check_required(['domain_id'])
def get_owner(self, params):
return self.domain_owner_mgr.get_owner(params['domain_id'], params.get('owner_id'), params.get('only'))
|
[
"whdalsrnt@megazone.com"
] |
whdalsrnt@megazone.com
|
9892521d3ac95312fdcb0b4127583b854e9f9472
|
b000127408f96db7411f301553585f5da0e426cd
|
/code/Find-Smallest-Letter-Greater-Than-Target.py
|
8aee8cd7459327bce3cf3cb8411503c855d7e7b3
|
[] |
no_license
|
SaiVK/Leetcode-Archive
|
5f758faf97d1ab559c4c75d26ae5cf7a256baef8
|
56bafeaaced2d0fd3b3d2f1a0365d24d5b41e504
|
refs/heads/master
| 2022-11-23T08:50:17.610247
| 2020-07-27T02:09:53
| 2020-07-27T02:09:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 983
|
py
|
import string
class Solution(object):
def nextGreatestLetter(self, letterList, target):
self.letters = list((string.ascii_lowercase * 3))
self.target = target
self.letterIndex = self.letters.index(target)
def calc_distance(letter):
x = self.letters.index(letter)
self.letters[x] = None
z = self.letters.index(letter)
self.letters[x] = letter
self.letters[self.letterIndex] = target
print (x - self.letterIndex)
print (z - self.letterIndex)
if ((x - self.letterIndex) < 0):
x = 99999
else:
x = (x - self.letterIndex)
if ((z - self.letterIndex) < 0):
z = 99999
else:
z = (z - self.letterIndex)
return min(x, z)
for v in sorted(letterList, key=(lambda k: calc_distance(k))):
if (v != target):
return v
|
[
"christopherlambert106@gmail.com"
] |
christopherlambert106@gmail.com
|
2813003b3862939824725bf3c272bb60edbaa9e8
|
c9000e5e30825b29febbefa5ad00da1f57551f8e
|
/03/zhumeichao/test02.py
|
80b502f03b776005dc5eeba1ff236505504d123b
|
[] |
no_license
|
xiaotian1991/actual-10-homework
|
81c58b24f58fc87e4890f1475ad83de8b66ee53b
|
0b379ca6189f843f121df4db5814c83262f9981a
|
refs/heads/master
| 2021-06-12T23:35:52.954510
| 2017-03-24T07:41:18
| 2017-03-24T07:41:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,608
|
py
|
#!/usr/bin/env python
#coding=utf-8
#字符串、列表、字典之间的转换
text="who have touched their lives Love begins with a smile grows with a kiss and ends with a tear The brightest future will always be based on a forgotten past you can’t go on well in lifeuntil you let go of your past failures and heartaches"
#字符串切割 --》列表
word=text.split(" ")
print word
#统计字符串出现次数 --》字典
d={}
for i in word:
if i not in d:
d[i]=1
else:
d[i]+=1
print d
#反转统计次数对应字符串 --》字典嵌套列表
dr={}
'''方法一:
for k,v in d.items():
if v not in dr:
dr[v]=[k]
else:
dr[v].append(k)
'''
#方法二:
for k,v in d.items():
dr.setdefault(v,[])
dr[v].append(k)
print dr
#对字典中key次数进行排序 --》列表
print dr.items()[::-1]
#显示在html文件中
f = open('tongji.html','w+')
f.write("<html><table style='border:solid 1px'>")
f.write("<th style='border:solid 2px' width=80px>出现次数</th><th style='border:solid 2px'>单词汇总</th>")
#列表取值
arr=dr.items()[::-1]
for t in arr:
f.write('<tr><td style="border:solid 1px">%s</td><td style="border:solid 1px">%s</td></tr>' % (t[0],t[1]))
'''
#字典取值
count = 0
while count < 4:
key = max(dr.keys())
print "出现了%s次的单词:%s" % (key,dr[key])
for word in dr[key]:
f.write('<tr><td style="border:solid 1px">%s</td><td style="border:solid 1px">%s</td></tr>' % (key,word))
dr.pop(key)
count = count +1
'''
f.write("</table></html>")
f.close()
|
[
"shengxinjing@addnewer.com"
] |
shengxinjing@addnewer.com
|
64edf9702104dce44c2a1f5a7b4477742e5c33cd
|
6b98594c029605806418d187672f476fde5792b7
|
/sandbox/rocky/tf/spaces/discrete.py
|
13ad009859ccef63e31b6edb60a1316baeaee9ff
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
russellmendonca/GMPS
|
3f65eb250bff008da9beea6b9d1f04aca2d46c6a
|
638087160c48f9f016dc74b0904f8ba2503ea285
|
refs/heads/master
| 2023-01-22T07:53:21.504243
| 2019-10-26T11:23:42
| 2019-10-26T11:23:42
| 178,866,391
| 18
| 8
|
NOASSERTION
| 2023-01-19T06:42:27
| 2019-04-01T13:12:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,611
|
py
|
from rllab.spaces.base import Space
import numpy as np
from rllab.misc import special
from rllab.misc import ext
import tensorflow as tf
class Discrete(Space):
"""
{0,1,...,n-1}
"""
def __init__(self, n):
self._n = n
@property
def n(self):
return self._n
def sample(self):
return np.random.randint(self.n)
def sample_n(self, n):
return np.random.randint(low=0, high=self.n, size=n)
def contains(self, x):
x = np.asarray(x)
return x.shape == () and x.dtype.kind == 'i' and x >= 0 and x < self.n
def __repr__(self):
return "Discrete(%d)" % self.n
def __eq__(self, other):
return self.n == other.n
def flatten(self, x):
return special.to_onehot(x, self.n)
def unflatten(self, x):
return special.from_onehot(x)
def flatten_n(self, x):
return special.to_onehot_n(x, self.n)
def unflatten_n(self, x):
return special.from_onehot_n(x)
@property
def default_value(self):
return 0
@property
def flat_dim(self):
return self.n
def weighted_sample(self, weights):
return special.weighted_sample(weights, range(self.n))
def new_tensor_variable(self, name, extra_dims):
# needed for safe conversion to float32
return tf.placeholder(dtype=tf.uint8, shape=[None] * extra_dims + [self.flat_dim], name=name)
def __eq__(self, other):
if not isinstance(other, Discrete):
return False
return self.n == other.n
def __hash__(self):
return hash(self.n)
|
[
"russellm@berkeley.edu"
] |
russellm@berkeley.edu
|
016c9a80eb3293fe19613d524785afd10481a1bc
|
3512a534001e2b4dcc10808fee7d57b9ac9f1f13
|
/accounts/views.py
|
9907d95a50e352a839fd4a6446ea54056ea407f4
|
[] |
no_license
|
suryanshtokas/blog-app
|
1adb4521e731ed693d103220c4ec1111c26894a8
|
80784c643e73a67e0bc89ee62e5358b3312a9314
|
refs/heads/master
| 2022-11-21T01:35:59.437795
| 2020-07-16T14:23:28
| 2020-07-16T14:23:28
| 280,171,745
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 334
|
py
|
from django.shortcuts import render
from django.contrib.auth.forms import UserCreationForm
from django.urls import reverse_lazy
from django.views import generic
class SignUpView(generic.CreateView):
form_class = UserCreationForm
success_url = reverse_lazy('login')
template_name='signup.html'
# Create your views here.
|
[
"yourname@email.com"
] |
yourname@email.com
|
984dfecf51ecb2edfcee1084f4c5a3dcfe5126f0
|
9eb6528606cf9dd011a3ce0c1605b111c9d50955
|
/python/6-4super函数使用2.py
|
415ba6a7c294f71de06da5708a31d96bf1bab2d8
|
[] |
no_license
|
arch123A/luoye
|
0ca9f787c7d5e9ba89d2ae602528e68d7d31a636
|
ba8e902cefba2c3ccc58bc266cdf9a7eff03a458
|
refs/heads/master
| 2023-02-02T22:47:00.256065
| 2020-12-13T09:30:33
| 2020-12-13T09:30:33
| 104,022,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,788
|
py
|
print("******多继承使用super().__init__ 发生的状态******")
class Parent(object):
def __init__(self, name, *args, **kwargs): # 为避免多继承报错,使用不定长参数,接受参数
print('parent的init开始被调用')
self.name = name
print('parent的init结束被调用')
class Son1(Parent):
def __init__(self, name, age, *args, **kwargs): # 为避免多继承报错,使用不定长参数,接受参数
print('Son1的init开始被调用')
self.age = age
super().__init__(name, *args, **kwargs) # 为避免多继承报错,使用不定长参数,接受参数
print('Son1的init结束被调用')
class Son2(Parent):
def __init__(self, name, gender, *args, **kwargs): # 为避免多继承报错,使用不定长参数,接受参数
print('Son2的init开始被调用')
self.gender = gender
super().__init__(name, *args, **kwargs) # 为避免多继承报错,使用不定长参数,接受参数
print('Son2的init结束被调用')
class Grandson(Son1, Son2):
def __init__(self, name, age, gender):
print('Grandson的init开始被调用')
# 多继承时,相对于使用类名.__init__方法,要把每个父类全部写一遍
# 而super只用一句话,执行了全部父类的方法,这也是为何多继承需要全部传参的一个原因
# super(Grandson, self).__init__(name, age, gender)
super().__init__(name, age, gender)
print('Grandson的init结束被调用')
print(Grandson.__mro__)
gs = Grandson('grandson', 12, '男')
print('姓名:', gs.name)
print('年龄:', gs.age)
print('性别:', gs.gender)
print("******多继承使用super().__init__ 发生的状态******\n\n")
|
[
"arch@msn.cn"
] |
arch@msn.cn
|
88d24b558542bdd433aa64dcd61afba9c7240541
|
e836eb7ee910c1ca85233c48eadcd49a52bb20ea
|
/Fractals/Mandelbrot.py
|
6e40f7a3084de57394306c1bc84a6b0b921b5a2c
|
[] |
no_license
|
SymmetricChaos/MyOtherMathStuff
|
5d0c47adfaad0e7fb7f0e8736617f15bbac7ed37
|
9dd6920b44658d2faacb54d7120e83ff7de45bf3
|
refs/heads/master
| 2021-06-26T18:34:13.178520
| 2021-01-03T14:22:10
| 2021-01-03T14:22:10
| 196,845,677
| 38
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,062
|
py
|
import math
import numpy as np
def mandel(n=30,m=30):
imarr = np.zeros((m*3+1,m*3+1))
r = math.floor(m*1.5)
xr = [x / float(m) -0.75 for x in range(-r,r+1,1)]
yr = [x / float(m) for x in range(-r,r+1,1)]
print(len(xr))
for x in range(m*3+1):
for y in range(m*3+1):
a,b = xr[x],yr[y]
for i in range(n+1):
a1 = (a*a)-(b*b) + xr[x]
b1 = 2*(a*b) + yr[y]
if(math.sqrt(a1*a1+b1*b1) > 2):
imarr[x][y] = i
break
a,b = a1,b1
if(i == n):
imarr[x][y] = n
return(imarr)
a = mandel(n=20,m=100)
import matplotlib.pyplot as plt
fig = plt.figure()
fig.set_size_inches(15,15)
ax = plt.Axes(fig,[0.,0.,1.,1.])
ax.set_axis_off()
fig.add_axes(ax)
plt.set_cmap("hot")
ax.imshow(a)
#plt.savefig("Mandelbrot.png",dpi=80)
#import csv
#with open("PythonMandel.csv","w",newline="") as csvfile:
# wtr = csv.writer(csvfile, delimiter = ",")
# wtr.writerow(a)
|
[
"ajfraebel@gmail.com"
] |
ajfraebel@gmail.com
|
9eb39fa7e26c964a1c581ca9bc6c19ae3c5518d4
|
45c170fb0673deece06f3055979ece25c3210380
|
/toontown/coghq/CashbotMintControlRoom_Battle00_Cogs.py
|
8e2affb201f05aaddb5f6cf4666f6853885ade9a
|
[] |
no_license
|
MTTPAM/PublicRelease
|
5a479f5f696cfe9f2d9dcd96f378b5ce160ec93f
|
825f562d5021c65d40115d64523bb850feff6a98
|
refs/heads/master
| 2021-07-24T09:48:32.607518
| 2018-11-13T03:17:53
| 2018-11-13T03:17:53
| 119,129,731
| 2
| 6
| null | 2018-11-07T22:10:10
| 2018-01-27T03:43:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,048
|
py
|
#Embedded file name: toontown.coghq.CashbotMintControlRoom_Battle00_Cogs
from toontown.coghq.SpecImports import *
from toontown.toonbase import ToontownGlobals
CogParent = 10000
BattleCellId = 0
BattleCells = {BattleCellId: {'parentEntId': CogParent,
'pos': Point3(0, 0, 0)}}
CogData = [{'parentEntId': CogParent,
'boss': 1,
'level': 14,
'battleCell': BattleCellId,
'pos': Point3(-6, 0, 0),
'h': 180,
'behavior': 'stand',
'path': None,
'skeleton': 1},
{'parentEntId': CogParent,
'boss': 0,
'level': 11,
'battleCell': BattleCellId,
'pos': Point3(-2, 0, 0),
'h': 180,
'behavior': 'stand',
'path': None,
'skeleton': 0},
{'parentEntId': CogParent,
'boss': 0,
'level': 10,
'battleCell': BattleCellId,
'pos': Point3(2, 0, 0),
'h': 180,
'behavior': 'stand',
'path': None,
'skeleton': 0},
{'parentEntId': CogParent,
'boss': 0,
'level': 10,
'battleCell': BattleCellId,
'pos': Point3(6, 0, 0),
'h': 180,
'behavior': 'stand',
'path': None,
'skeleton': 0}]
ReserveCogData = []
|
[
"linktlh@gmail.com"
] |
linktlh@gmail.com
|
aa8692f6cc8eabdf8af875a4dae6910929b7849a
|
534570bbb873293bd2646a1567b63d162fbba13c
|
/Python/Data Structure/Linear List/Array/K Sum/18.4-sum.py
|
5c1d0a6f23af7229aebaed7b762696320f35849d
|
[] |
no_license
|
XinheLIU/Coding-Interview
|
fa3df0f7167fb1bc6c8831748249ebaa6f164552
|
d6034c567cef252cfafca697aa316c7ad4e7d128
|
refs/heads/master
| 2022-09-17T14:30:54.371370
| 2022-08-19T15:53:35
| 2022-08-19T15:53:35
| 146,382,499
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,123
|
py
|
#
# @lc app=leetcode id=18 lang=python3
#
# [18] 4Sum
#
# @lc code=start
class Solution:
def fourSum(self, nums: List[int], target: int) -> List[List[int]]:
nums.sort()
res = []
n = len(nums)
for i in range(0, n - 3):
if i and nums[i] == nums[i - 1]:
continue
for j in range(i + 1, n - 2):
if j != i + 1 and nums[j] == nums[j - 1]:
continue
Sum = target - nums[i] - nums[j]
l, r = j + 1, n - 1
while l < r:
if nums[l] + nums[r] == Sum:
res.append([nums[i], nums[j], nums[l], nums[r]])
r -= 1
l += 1
while l < r and nums[l] == nums[l - 1]:
l += 1
while l < r and nums[r] == nums[r + 1]:
r -= 1
elif nums[l] + nums[r] > Sum:
r -= 1
else:
l += 1
return res
# @lc code=end
|
[
"LIUXinhe@outlook.com"
] |
LIUXinhe@outlook.com
|
5b73b192746d5b1228688ae57162a9d58057078d
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2248/60690/275776.py
|
ca2ee4bb78146b5668b55fdda0acc54a66584f86
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
n=int(input())
a=int(input())
b=int(input())
num=1
while n>0:
if num%a==0 or num%b==0: n-=1
num+=1
print((num-1)%(10**9+7))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
34ba01a58336d2f71eda9ce17d9bb08ce4174e3f
|
1e30de35480b76f5d00229f5d2fd805bf96ccd9a
|
/ch10_httpd/p5_lab1_config_web_app/webapp.wsgi
|
e535b4a2a1e06a05abcd8cb604bdf8fc0e354507
|
[] |
no_license
|
alexbaltman/RHCE
|
4be7987cb1b58c81d870f93b11d5a7a9d52d40c3
|
b143daeb2d5d92ebb4e2eb92efd2892dc8ed3bab
|
refs/heads/master
| 2021-01-13T14:50:44.837021
| 2017-02-16T03:51:05
| 2017-02-16T03:51:05
| 76,525,209
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 277
|
wsgi
|
def application(environ, start_response):
status = '200 OK'
output = 'Hello World!\n'
response_headers = [('Content-type', 'text/plain'),
('Content-Length', str(len(output)))]
start_response(status, response_headers)
return [output]
|
[
"aaltman@cisco.com"
] |
aaltman@cisco.com
|
0c428f58dddfce0e8d9c0d84f7a3137bcccc4a8b
|
61166d9797ba949be9ad1b805ae840ff8749e64b
|
/BINARY_SEARCH/binary_search_recursive.py
|
5c2a64976a81160344236a62fe27a90c71a91683
|
[] |
no_license
|
shubhamrocks888/questions
|
216f4f6e8ae6df5a1eb156c2cbf1004b09b1ca88
|
76c98c0e3edccf6552823892058e097b61daa530
|
refs/heads/master
| 2022-12-16T23:07:07.689312
| 2020-09-14T04:45:12
| 2020-09-14T04:45:12
| 284,040,183
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
# RECURSIVE APPROACH:
def binary_search(arr,start,end,n):
mid = (start+end)//2
if start<=end:
if arr[mid]==n:
return "yes"
elif arr[mid]<n:
return binary_search(arr,mid+1,end,n)
else:
return binary_search(arr,start,mid-1,n)
return "no"
arr = [1,2,3,4,5,6,7]
print (binary_search(arr,0,len(arr)-1,8))
|
[
"shubhamrocks888@gmail.com"
] |
shubhamrocks888@gmail.com
|
7be4818b6c0e6441e0cbd480a64d904766a69b8c
|
525a9f62e61c08e73e8dc9a862ac16f1a38c90ee
|
/guillotina_cms/content/document.py
|
d292d22d28c9bd5c118bd5020c43434a0ee22d2d
|
[
"BSD-2-Clause"
] |
permissive
|
plone/guillotina_cms
|
3a665e7bb226239d39226212773109e2aca16d88
|
44f945d263e92195a3a004a9bea3c89cb68db298
|
refs/heads/master
| 2021-06-05T15:14:12.181692
| 2020-11-25T06:42:20
| 2020-11-25T06:42:20
| 96,428,280
| 5
| 2
|
NOASSERTION
| 2019-11-27T08:02:50
| 2017-07-06T12:34:14
|
Python
|
UTF-8
|
Python
| false
| false
| 491
|
py
|
# -*- encoding: utf-8 -*-
from guillotina import configure
from guillotina.content import Folder
from guillotina.directives import index
from guillotina_cms.interfaces import IDocument
@configure.contenttype(
type_name="Document",
schema=IDocument,
behaviors=[
"guillotina.behaviors.dublincore.IDublinCore",
"guillotina_cms.interfaces.base.ICMSBehavior",
],
allowed_types=["Image", "File"], # dynamically calculated
)
class Document(Folder):
pass
|
[
"ramon.nb@gmail.com"
] |
ramon.nb@gmail.com
|
522d2d0a1f3690c89ebea50346950bc9d5bdae9c
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2313/60730/307045.py
|
03ebc4ac892099f3b7f6ae0b0fd55ca0e4c16566
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,168
|
py
|
# coding=utf-8
import queue
class TreeNode(object):
def __init__(self, data=None, left=0, right=0):
self.data = data
if left != 0:
self.left = left
else:
self.left = 0
if right != 0:
self.right = right
else:
self.right = 0
# 这一步是在每次调用某个结点时,自动调用.data的方法
def __str__(self):
return str(self.data)
class Solution:
def createTree(self, root):
if root.left != 0:
root.left = num[root.left - 1]
self.createTree(root.left)
if root.right != 0:
root.right = num[root.right - 1]
self.createTree(root.right)
return root
def midTraverse(self, root):
if root == 0:
return
self.midTraverse(root.left)
ans.append(root.data)
self.midTraverse(root.right)
def is_cbt(self, root):
if root is None:
return True
q = [root]
reach_leaf = False
while q:
node = q.pop(0)
l, r = node.left, node.right
if l == 0 and r != 0:
return False
if (l != 0 or r != 0) and reach_leaf:
return False
if l != 0:
q.append(l)
if r != 0:
q.append(r)
else:
reach_leaf = True
return True
if __name__ == "__main__":
n, root = map(int, input().strip().split())
num = []
for i in range(n * n):
num.append(TreeNode(i + 1, 0, 0))
ans = []
cnt = []
for i in range(n):
try:
a, b, c = map(int, input().strip().split())
num[a - 1] = TreeNode(a, b, c)
if root == a:
root = TreeNode(a, b, c)
n = root
except:
break
solution = Solution()
solution.createTree(root)
solution.midTraverse(root)
if solution.is_cbt(root):
print("true")
else:
print("false")
test = ans
if sorted(test) == ans:
print("true")
else:
print("false")
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
9b5869e02799aa26440534bb61ebed80f49efc52
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/AtCoder/agc025/B/3089839.py
|
33a767ce2060231504f7cf243adfe0c97f64ab32
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 693
|
py
|
N,A,B,K=map(int,input().split())
P=998244353
def egcd(a, b):
(x, lastx) = (0, 1)
(y, lasty) = (1, 0)
while b != 0:
q = a // b
(a, b) = (b, a % b)
(x, lastx) = (lastx - q * x, x)
(y, lasty) = (lasty - q * y, y)
return (lastx, lasty, a)
def inv(x):
return egcd(x,P)[0]
Fact=[0 for i in range(N+1)]
Finv=[0 for i in range(N+1)]
Fact[0]=1
Finv[0]=1
for i in range(1,N+1):
Fact[i]=(i*Fact[i-1])%P
Finv[i]=(Finv[i-1]*inv(i))%P
ans=0
def NC(k):
tmp=(Finv[k]*Finv[N-k])%P
return (Fact[N]*tmp)%P
for x in range(N+1):
y=(K-x*A)//B
if x*A+y*B==K and 0<=y<=N:
ans+=(NC(x)*NC(y))%P
print(ans%P)
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
4b9e9217785da8c7dabeb772d5a56b118350522c
|
aa0270b351402e421631ebc8b51e528448302fab
|
/sdk/eventgrid/azure-mgmt-eventgrid/generated_samples/partner_configurations_unauthorize_partner.py
|
3d4824a9f378093e0e3631555305ab784c298df3
|
[
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
fangchen0601/azure-sdk-for-python
|
d04a22109d0ff8ff209c82e4154b7169b6cb2e53
|
c2e11d6682e368b2f062e714490d2de42e1fed36
|
refs/heads/master
| 2023-05-11T16:53:26.317418
| 2023-05-04T20:02:16
| 2023-05-04T20:02:16
| 300,440,803
| 0
| 0
|
MIT
| 2020-10-16T18:45:29
| 2020-10-01T22:27:56
| null |
UTF-8
|
Python
| false
| false
| 1,838
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.eventgrid import EventGridManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-eventgrid
# USAGE
python partner_configurations_unauthorize_partner.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = EventGridManagementClient(
credential=DefaultAzureCredential(),
subscription_id="5b4b650e-28b9-4790-b3ab-ddbd88d727c4",
)
response = client.partner_configurations.unauthorize_partner(
resource_group_name="examplerg",
partner_info={
"authorizationExpirationTimeInUtc": "2022-01-28T01:20:55.142Z",
"partnerName": "Contoso.Finance",
"partnerRegistrationImmutableId": "941892bc-f5d0-4d1c-8fb5-477570fc2b71",
},
)
print(response)
# x-ms-original-file: specification/eventgrid/resource-manager/Microsoft.EventGrid/stable/2022-06-15/examples/PartnerConfigurations_UnauthorizePartner.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
fangchen0601.noreply@github.com
|
44f6ec999e0c559eb303b70a47b8dd21f56f6cb8
|
d057cafdb198af93a969c10829f4f28c59fbcd15
|
/reborn_web/free/migrations/0002_free_images.py
|
ddf679d2579ed6b560ebab34463b808d2ca30c1a
|
[] |
no_license
|
PresentJay/Re-Born-Web
|
b6b979b965fdc2e442d4e55decc1d6917f8ef0aa
|
36521e59829c3b01cc749e20241db277ce027fcd
|
refs/heads/master
| 2021-01-01T13:43:49.803851
| 2020-02-08T15:05:17
| 2020-02-08T15:05:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 436
|
py
|
# Generated by Django 3.0.2 on 2020-01-29 10:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('free', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='free',
name='images',
field=models.ImageField(blank=True, null=True, upload_to='summernote/%Y/%m/%d', verbose_name='이미지'),
),
]
|
[
"park19996@naver.com"
] |
park19996@naver.com
|
439126eba273377bc5ea787c30d09045764a9ec6
|
ee8c4c954b7c1711899b6d2527bdb12b5c79c9be
|
/assessment2/amazon/run/core/controllers/vase.py
|
fc7802475ede80af2fe9834e372e63427cc31a19
|
[] |
no_license
|
sqlconsult/byte
|
02ac9899aebea4475614969b594bfe2992ffe29a
|
548f6cb5038e927b54adca29caf02c981fdcecfc
|
refs/heads/master
| 2021-01-25T14:45:42.120220
| 2018-08-11T23:45:31
| 2018-08-11T23:45:31
| 117,135,069
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 362
|
py
|
#!/usr/bin/env python3
from flask import Blueprint, Flask, render_template, request, url_for
controller = Blueprint('vase', __name__, url_prefix='/vase')
# @controller.route('/<string:title>', methods=['GET'])
# def lookup(title):
# if title == 'Republic': # TODO 2
# return render_template('republic.html') # TODO 2
# else:
# pass
|
[
"sqlconsult@hotmail.com"
] |
sqlconsult@hotmail.com
|
5a537e03cd977e2a4724cfe99883c2d5f3f1c0c4
|
87cac4166f07729f1c94066259996c8b752c1202
|
/aiobbox/tools/startbox.py
|
e18beb378195b37e2a8fc3854c14fcedddfff47a
|
[] |
no_license
|
danielsocials/bbox
|
068238a15880468d214109a23017a19e70fc13ec
|
292e350b1cefbbab987baf8c946d4021abd211ea
|
refs/heads/master
| 2020-03-16T06:25:47.907369
| 2018-05-08T04:42:45
| 2018-05-08T04:42:45
| 132,554,332
| 0
| 0
| null | 2018-05-08T04:36:35
| 2018-05-08T04:36:35
| null |
UTF-8
|
Python
| false
| false
| 2,299
|
py
|
import os, sys
import logging
import uuid
import json
import asyncio
import argparse
import aiobbox.server as bbox_server
from aiobbox.cluster import get_box, get_cluster
from aiobbox.cluster import get_ticket
from aiobbox.utils import import_module
from aiobbox.handler import BaseHandler
class Handler(BaseHandler):
help = 'start bbox python project'
run_forever = True
def add_arguments(self, parser):
parser.add_argument(
'module',
type=str,
nargs='+',
help='the box service module to load')
parser.add_argument(
'--boxid',
type=str,
default='',
help='box id')
parser.add_argument(
'--ssl',
type=str,
default='',
help='ssl prefix, the files certs/$prefix/$prefix.crt and certs/$prefix/$prefix.key must exist if specified')
parser.add_argument(
'--ttl',
type=float,
default=3600 * 24, # one day
help='time to live')
async def run(self, args):
cfg = get_ticket()
if cfg.language != 'python3':
print('language must be python3', file=sys.stderr)
sys.exit(1)
if not args.boxid:
args.boxid = uuid.uuid4().hex
mod_handlers = []
for modspec in args.module:
mod = import_module(modspec)
if hasattr(mod, 'Handler'):
mod_handlers.append(mod.Handler())
else:
mod_handlers.append(BaseHandler())
# start cluster client
await get_cluster().start()
src, handler = await bbox_server.start_server(args)
for h in mod_handlers:
await h.start(args)
self.handler = handler
self.mod_handlers = mod_handlers
asyncio.ensure_future(self.wait_ttl(args.ttl))
async def wait_ttl(self, ttl):
await asyncio.sleep(ttl)
logging.warn('box ttl expired, stop')
sys.exit(0)
def shutdown(self):
loop = asyncio.get_event_loop()
for h in self.mod_handlers:
h.shutdown()
loop.run_until_complete(get_box().deregister())
#loop.run_until_complete(
# self.handler.finish_connections())
|
[
"superisaac.ke@gmail.com"
] |
superisaac.ke@gmail.com
|
08f74db631409569f43498b9e98548a9ab7a92ac
|
62d6a37e1fb1b224b53e14a1cf151ef0571aa20f
|
/orun/contrib/admin/models/log.py
|
d3899b5d47850b596221aa5c978c9a89307a0514
|
[] |
no_license
|
katrid/orun
|
4fa0f291a1ef43f16bc1857a170fc0b2e5e06739
|
bfc6dae06182124ba75b1f3761d81ba8ca387dea
|
refs/heads/master
| 2023-08-30T03:58:34.570527
| 2023-08-09T04:05:30
| 2023-08-09T04:05:30
| 66,562,767
| 14
| 4
| null | 2023-01-06T22:29:37
| 2016-08-25T14:01:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,293
|
py
|
from orun.db import models
class LogEntry(models.Model):
"""
Log entries on the internal database logging
"""
user = models.ForeignKey('auth_user', null=False, db_index=True)
action = models.ForeignKey('ui.action') # optional ui action
object_id = models.BigIntegerField()
content_type = models.ForeignKey('content.type', on_delete=models.SET_NULL)
content_object = models.CharField(200)
change_message = models.TextField()
class Meta:
log_changes = False
name = 'ui.admin.log'
class UserMenuCounter(models.Model):
"""
Rank the number of times that an action is accessed by user on Admin UI
"""
user = models.ForeignKey('auth.user', null=False, db_index=True, on_delete=models.DB_CASCADE)
menu = models.ForeignKey('ui.menu', null=False, on_delete=models.DB_CASCADE)
counter = models.PositiveIntegerField(default=0)
class Meta:
log_changes = False
name = 'ui.admin.user.menu.counter'
@classmethod
def hit(cls, user, action):
"""
Log a new entry to user to the action
:param user:
:param action:
:return:
"""
counter = cls.objects.get_or_create(user=user, action=action)
counter.update(counter=counter.counter + 1)
|
[
"alexandre@katrid.com"
] |
alexandre@katrid.com
|
4efb2692ed3bce696cc2881b9ce2700ab1ab1953
|
61747f324eaa757f3365fd7bf5ddd53ea0db47d1
|
/casepro/msgs/migrations/0030_label_tests.py
|
ecf4d3e3d4410ce80bff7c39c61bfae32aa3f217
|
[
"BSD-3-Clause"
] |
permissive
|
BlueRidgeLabs/casepro
|
f8b0eefa8f961dd2fdb5da26a48b619ebc1f8c12
|
8ef509326f3dfa80bb44beae00b60cc6c4ac7a24
|
refs/heads/master
| 2022-01-24T09:01:18.881548
| 2017-12-05T18:46:05
| 2017-12-05T18:49:42
| 113,502,588
| 0
| 0
| null | 2017-12-07T21:57:37
| 2017-12-07T21:57:37
| null |
UTF-8
|
Python
| false
| false
| 1,016
|
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
import json
from casepro.rules.models import ContainsTest, Quantifier
from casepro.utils import parse_csv
from django.db import migrations, models
def populate_label_tests(apps, schema_editor):
Label = apps.get_model('msgs', 'Label')
for label in Label.objects.all():
keywords = parse_csv(label.keywords) if label.keywords else []
if keywords:
label.tests = json.dumps([ContainsTest(keywords, Quantifier.ANY).to_json()])
label.save(update_fields=('tests',))
print("Migrated label #%d with keywords %s" % (label.pk, label.keywords))
class Migration(migrations.Migration):
dependencies = [
('msgs', '0029_folder_indexes_pt3'),
]
operations = [
migrations.AddField(
model_name='label',
name='tests',
field=models.TextField(blank=True),
),
migrations.RunPython(populate_label_tests)
]
|
[
"rowanseymour@gmail.com"
] |
rowanseymour@gmail.com
|
89f128b97ed2d3eddc33a16f5476131dc083373e
|
b95fa99bb1ba2210b73251614d2613363c37f932
|
/deploy/anne/top.py
|
3f131536072081265b07d93441651fadcbd48744
|
[] |
no_license
|
lingxiao/learn-adj-relation
|
d1a8894fefc776ec0bd414b5f038361ed4b79d16
|
dc4285af19e53d7e2d015eb6394f6c601c707da0
|
refs/heads/master
| 2020-12-30T16:27:51.531268
| 2017-06-07T18:59:48
| 2017-06-07T18:59:48
| 87,714,049
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,532
|
py
|
############################################################
# Module : get google anne lines that contain words in graph
# split edges and make main-#.py
# Date : April 2nd, 2017
# Author : Xiao Ling
############################################################
import os
import pickle
import shutil
from utils import *
from scripts import *
from app import *
from experiments import *
############################################################
'''
paths
'''
dirs = working_dirs('anne',['scripts','shells', 'results-ppdb-1', 'results-ppdb-ngram-1'])
anne = get_path('anne')
files = [p for p in os.listdir(anne) if '.txt' in p]
############################################################
'''
@Use: rewrite main-#.py file
'''
def run_auto_main(tot, work_dir, script_dir):
print('\n>> running run_auto_main for total: ' + str(tot) )
print('\n>> removing existing scripts...')
shutil.rmtree(script_dir)
os.mkdir(script_dir)
cnt = 0
for k in xrange(tot):
src_path = os.path.join(work_dir, 'anne-0.py')
tgt_path = os.path.join(script_dir, 'anne-' + str(cnt) + '.py')
src_strs = ['batch = 0']
tgt_strs = ['batch = ' + str(cnt)]
auto_gen(src_path, tgt_path, src_strs, tgt_strs)
cnt += 1
'''
@Use: rewrite main-#.sh file
'''
def run_auto_sh(tot, work_dir, shell_dir):
print('\n>> running run_auto_sh for total: ' + str(tot))
print('\n>> removing existing scripts...')
shutil.rmtree(shell_dir)
os.mkdir(shell_dir)
cnt = 0
for k in xrange(tot):
src_path = os.path.join(work_dir,'anne-0.sh')
tgt_path = os.path.join(shell_dir,'anne-' + str(cnt) + '.sh')
src_strs = ['anne-0']
tgt_strs = ['anne-' + str(cnt)]
auto_gen(src_path, tgt_path, src_strs, tgt_strs)
cnt +=1
############################################################
'''
generate py and shell scripts
'''
if False:
num_jobs = len(files)
print('\n\t>> found ' + str(num_jobs) + ' jobs')
run_auto_main( num_jobs
, dirs['root']
, dirs['scripts'])
run_auto_sh ( num_jobs
, dirs['root']
, dirs['shells'])
############################################################
'''
combine results into one file
'''
if True:
data_sets = ['ppdb-1', 'ppdb-ngram-1']
for data_set in data_sets:
print('\n\t>> saving results algo only for ' + data_set)
results_dir = dirs['results-' + data_set ]
results = read_results(results_dir)
out_path = os.path.join(dirs['root'], 'anne-all-' + data_set + '.txt')
save_results(results, out_path, algo_only = True)
|
[
"lingxiao@seas.upenn.edu"
] |
lingxiao@seas.upenn.edu
|
850b4c367e89f11ec61b6ca59130366be349187c
|
8f12530968425e36a41e3495aefedece69278364
|
/第八章appium使用爬取app/爬取美团店铺名称.py
|
2d1c69712a879ebf49cbe9bd5597347e7da9f0be
|
[] |
no_license
|
q3293183121/hhh
|
126ab6f93dec613b0d0bf69595a09ca3c7a4623d
|
bc72bd6ad69a2bdd1a562466b1d7a8ab29c2aef9
|
refs/heads/master
| 2020-05-21T16:08:53.380063
| 2019-05-11T09:31:45
| 2019-05-11T09:31:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 923
|
py
|
from appium import webdriver
import time
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '5.1.1'
desired_caps['deviceName'] = '8934c63a'
desired_caps['appPackage'] = 'com.sankuai.meituan.takeoutnew'
desired_caps['appActivity'] = '.ui.page.boot.WelcomeActivity'
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
driver.implicitly_wait(20)
time.sleep(5)
driver.find_element_by_xpath('//*[@resource-id="com.sankuai.meituan.takeoutnew:id/layout_search_box_normal"]').click()
driver.find_element_by_xpath('//*[@resource-id="com.sankuai.meituan.takeoutnew:id/search_action_bar_container"]').send_keys('hanbao')
driver.find_element_by_xpath('//*[@resource-id="com.sankuai.meituan.takeoutnew:id/search_tv"]').click()
data = driver.find_elements_by_xpath('//*[@resource-id="com.sankuai.meituan.takeoutnew:id/textview_poi_name"]')
for i in data:
print(i.text)
|
[
"you@example.com"
] |
you@example.com
|
3b981a9fba01cd273902606da0f933d6922cccc7
|
fea44d5ca4e6c9b2c7950234718a4531d453849e
|
/sktime/forecasting/online_learning/tests/test_online_learning.py
|
0d69e830d91f097dbf4c44b2f9e82f9db2795a82
|
[
"BSD-3-Clause"
] |
permissive
|
mlgig/sktime
|
288069ab8c9b0743113877032dfca8cf1c2db3fb
|
19618df351a27b77e3979efc191e53987dbd99ae
|
refs/heads/master
| 2023-03-07T20:22:48.553615
| 2023-02-19T18:09:12
| 2023-02-19T18:09:12
| 234,604,691
| 1
| 0
|
BSD-3-Clause
| 2020-01-17T17:50:12
| 2020-01-17T17:50:11
| null |
UTF-8
|
Python
| false
| false
| 3,478
|
py
|
#!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""Test OnlineEnsembleForecaster."""
__author__ = ["magittan"]
import numpy as np
import pytest
from sklearn.metrics import mean_squared_error
from sktime.datasets import load_airline
from sktime.forecasting.exp_smoothing import ExponentialSmoothing
from sktime.forecasting.model_selection import (
SlidingWindowSplitter,
temporal_train_test_split,
)
from sktime.forecasting.naive import NaiveForecaster
from sktime.forecasting.online_learning._online_ensemble import OnlineEnsembleForecaster
from sktime.forecasting.online_learning._prediction_weighted_ensembler import (
NNLSEnsemble,
NormalHedgeEnsemble,
)
from sktime.utils.validation._dependencies import _check_soft_dependencies
cv = SlidingWindowSplitter(start_with_window=True, window_length=1, fh=1)
@pytest.mark.skipif(
not _check_soft_dependencies("statsmodels", severity="none"),
reason="skip test if required soft dependency for hmmlearn not available",
)
def test_weights_for_airline_averaging():
"""Test weights."""
y = load_airline()
y_train, y_test = temporal_train_test_split(y)
forecaster = OnlineEnsembleForecaster(
[
("ses", ExponentialSmoothing(seasonal="multiplicative", sp=12)),
(
"holt",
ExponentialSmoothing(
trend="add", damped_trend=False, seasonal="multiplicative", sp=12
),
),
(
"damped_trend",
ExponentialSmoothing(
trend="add", damped_trend=True, seasonal="multiplicative", sp=12
),
),
]
)
forecaster.fit(y_train)
expected = np.array([1 / 3, 1 / 3, 1 / 3])
np.testing.assert_allclose(forecaster.weights, expected, rtol=1e-8)
def test_weights_for_airline_normal_hedge():
"""Test weights."""
y = load_airline()
y_train, y_test = temporal_train_test_split(y)
hedge_expert = NormalHedgeEnsemble(n_estimators=3, loss_func=mean_squared_error)
forecaster = OnlineEnsembleForecaster(
[
("av5", NaiveForecaster(strategy="mean", window_length=5)),
("av10", NaiveForecaster(strategy="mean", window_length=10)),
("av20", NaiveForecaster(strategy="mean", window_length=20)),
],
ensemble_algorithm=hedge_expert,
)
forecaster.fit(y_train)
forecaster.update_predict(y=y_test, cv=cv, reset_forecaster=False)
expected = np.array([0.17077154, 0.48156709, 0.34766137])
np.testing.assert_allclose(forecaster.weights, expected, atol=1e-8)
def test_weights_for_airline_nnls():
"""Test weights."""
y = load_airline()
y_train, y_test = temporal_train_test_split(y)
hedge_expert = NNLSEnsemble(n_estimators=3, loss_func=mean_squared_error)
forecaster = OnlineEnsembleForecaster(
[
("av5", NaiveForecaster(strategy="mean", window_length=5)),
("av10", NaiveForecaster(strategy="mean", window_length=10)),
("av20", NaiveForecaster(strategy="mean", window_length=20)),
],
ensemble_algorithm=hedge_expert,
)
forecaster.fit(y_train)
forecaster.update_predict(y=y_test, cv=cv, reset_forecaster=False)
expected = np.array([0.04720766, 0, 1.03410876])
np.testing.assert_allclose(forecaster.weights, expected, atol=1e-8)
|
[
"noreply@github.com"
] |
mlgig.noreply@github.com
|
53900eeda350da17ad2b6331007f483e1777455b
|
c23fe3a934687023b2b93fd4992f0e01ed008a18
|
/ckanext/queue/config.py
|
c2a907abe04817741b17e46e14450915790cf767
|
[] |
no_license
|
okfn/ckanext-queue
|
eb928cfee6d04a69c53ba544c2f0c2c0eddafe5c
|
c8c55f8c92bce04f060dd573fac2ff10ecd35b44
|
refs/heads/master
| 2016-09-05T11:49:13.147030
| 2011-03-07T14:52:25
| 2011-03-07T14:52:25
| 2,897,696
| 0
| 0
| null | 2014-09-17T14:51:23
| 2011-12-02T10:15:53
|
Python
|
UTF-8
|
Python
| false
| false
| 2,359
|
py
|
import ConfigParser
import os
import logging
import optparse
from urlparse import urlparse
from paste.script.util.logging_config import fileConfig
DEFAULT_SECTION = 'worker'
class attrDict(dict):
def __setattr__(self, item, value):
self[item] = value
def make_optparse(parser=None):
if parser is None:
parser = optparse.OptionParser()
parser.add_option('-c', '--config', dest='config_file', help='worker config file')
parser.add_option('-s', '--section', dest='section', default=DEFAULT_SECTION,
help='relevant section in config file')
parser.add_option('-q', '--queue', dest='queue', help='queue name', default='ckan_worker')
parser.add_option('-r', '--routing-key', dest='routing_key', help='queue routing key')
parser.add_option('-a', '--amqp-url', dest='amqp', help='URL for the amqp host')
parser.add_option('-i', '--site-id', dest='ckan.site_id', help='CKAN site ID')
parser.add_option('-u', '--site-url', dest='ckan.site_url', help='CKAN site URL')
parser.add_option('-k', '--api-key', dest='ckan.api_key', help='CKAN api key')
return parser
def read_config(section, config_file=None):
config = ConfigParser.ConfigParser()
if config_file is None:
logging.warn("No config file specified, using worker.cfg")
config_file = 'worker.cfg'
configs = [config_file, os.path.expanduser('~/.ckanworker.cfg')]
config.read(configs)
for c in configs:
try:
fileConfig(c)
break
except: pass
data = config.defaults()
try:
data.update(dict(config.items(section)))
except ConfigParser.NoSectionError:
pass
return data
def run_parser(parser):
(options, args) = parser.parse_args(values=attrDict())
if 'amqp' in options:
url = urlparse(options.get('amqp'))
options['amqp_port'] = url.port
options['amqp_hostname'] = url.netloc
options['amqp_user_id'] = url.username
options['amqp_hostname'] = url.password
options['amqp_virtual_host'] = url.path.strip("/")
config = read_config(options.get('section', DEFAULT_SECTION),
options.get('config_file'))
config.update(options)
return config, args
def configure():
return run_parser(make_optparse())
|
[
"friedrich@pudo.org"
] |
friedrich@pudo.org
|
ad356df0eb0e885f01ed9f78a0b2c4534c95b4f3
|
c2a7c8c85bfd48be44ee1d1e42871c1be6e28566
|
/lib/saq/persistence/__init__.py
|
7a353deebf86e4f9b4b3a149557016a3d9f753f4
|
[
"Apache-2.0"
] |
permissive
|
iaji/ACE-1
|
3f2b1f51ee03d85883fb804e7c48a545f2bc1e89
|
8c1d96179d252cbbea16384097522d9db4f2aa83
|
refs/heads/master
| 2022-07-19T19:33:08.493579
| 2020-03-15T20:09:47
| 2020-03-15T20:09:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,414
|
py
|
# vim: sw=4:ts=4:et
#
# Persistence
# Functionality to store data in long term storage external to the system.
#
import functools
import logging
import pickle
import saq
from saq.database import (
Persistence,
PersistenceSource,
execute_with_retry,
get_db_connection,
retry
)
def persistant_property(*key_args):
"""Utility decorator for Persistable-based objects. Adds any arguments as properties
that automatically loads and stores the value in the persistence table in the database.
These arguments are created as permanent persistent properties."""
def _decorator(cls):
@functools.wraps(cls)
def wrapper(*args, **kwargs):
for key in key_args:
# this _closure function is required since we're using a for loop and a closure
# see http://www.discoversdk.com/blog/closures-in-python-3
def _closure(key=key):
internal_key = f'_{key}' # internal value
internal_key_loaded = f'_{key}_loaded' # boolean set to True after it's loaded
def _getter(self):
try:
self.load_persistent_property(key)
except Exception as e:
logging.error(f"unable to load persistence key {key}: {e}")
return getattr(self, internal_key)
def _setter(self, value):
try:
retry(self.save_persistent_property(key, value))
except Exception as e:
logging.error(f"unable to save persistence key {key}: {e}")
setattr(self, internal_key, value)
setattr(cls, internal_key, None)
setattr(cls, internal_key_loaded, False)
setattr(cls, key, property(_getter, _setter))
_closure(key)
return cls(*args, **kwargs)
return wrapper
return _decorator
class Persistable(object):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.persistence_source = None
self.persistence_key_mapping = {}
def register_persistence_source(self, source_name):
persistence_source = saq.db.query(PersistenceSource).filter(
PersistenceSource.company_id == saq.COMPANY_ID,
PersistenceSource.name == source_name).first()
if persistence_source is None:
logging.info(f"registering persistence source {source_name}")
saq.db.add(PersistenceSource(company_id=saq.COMPANY_ID, name=source_name))
saq.db.commit()
persistence_source = saq.db.query(PersistenceSource).filter(
PersistenceSource.company_id == saq.COMPANY_ID,
PersistenceSource.name == source_name).first()
if persistence_source is None:
logging.critical(f"unable to create persistence source for {source_name}")
return None
saq.db.expunge(persistence_source)
self.persistence_source = persistence_source
return self.persistence_source
def load_persistent_property(self, key_name):
if self.persistence_source is None:
raise RuntimeError(f"a request to load a persistence key on {self} before register_persistence_source was called")
internal_key = f'_{key_name}'
internal_key_loaded = f'_{key_name}_loaded'
# have we already loaded it?
if getattr(self, internal_key_loaded):
return
persistence = saq.db.query(Persistence).filter(Persistence.source_id == self.persistence_source.id,
Persistence.uuid == key_name).first()
key_value = None
if persistence is not None:
key_value = pickle.loads(persistence.value)
setattr(self, internal_key, key_value)
setattr(self, internal_key_loaded, True)
def save_persistent_property(self, key_name, key_value=None):
if self.persistence_source is None:
raise RuntimeError(f"a request to set a persistence key on {self} before register_persistence_source was called")
# are we already tracking it?
if key_name in self.persistence_key_mapping:
# update the value
saq.db.execute(Persistence.__table__.update().values(value=pickle.dumps(key_value))\
.where(Persistence.id == self.persistence_key_mapping[key_name]))
saq.db.commit()
return True
# get the tracking information
persistence = saq.db.query(Persistence).filter(Persistence.source_id == self.persistence_source.id,
Persistence.uuid == key_name).first()
if persistence is not None:
# and then update the value
self.persistence_key_mapping[key_name] = persistence.id
saq.db.execute(Persistence.__table__.update().values(value=pickle.dumps(key_value))\
.where(Persistence.id == self.persistence_key_mapping[key_name]))
saq.db.commit()
return True
# otherwise we're creating a new persistence key
logging.debug(f"registering persistence key {key_name}")
persistence = Persistence(source_id=self.persistence_source.id,
permanent=True,
uuid=key_name,
value=pickle.dumps(key_value))
saq.db.add(persistence)
saq.db.commit()
self.persistence_key_mapping[key_name] = persistence.id
return True
def save_persistent_key(self, key_name):
"""Creates a new persistent key with no value recorded. The key must not already exist."""
self.save_persistent_data(key_name)
def save_persistent_data(self, key_name, key_value=None):
"""Creates a new persistent key with the given value recorded. The key must not already exist."""
if key_value is not None:
key_value = pickle.dumps(key_value)
with get_db_connection() as db:
c = db.cursor()
execute_with_retry(db, c, """
INSERT INTO persistence (
source_id,
uuid,
value
) VALUES ( %s, %s, %s )
ON DUPLICATE KEY UPDATE last_update = CURRENT_TIMESTAMP""", (self.persistence_source.id, key_name, key_value),
commit=True)
def load_persistent_data(self, key_name):
"""Returns the value of the persistent key by name. Raises an exception if the key does not exist."""
persistence = saq.db.query(Persistence).filter(Persistence.source_id == self.persistence_source.id,
Persistence.uuid == key_name).one()
if persistence.value is None:
return None
return pickle.loads(persistence.value)
def persistent_data_exists(self, key_name):
"""Returns True if the given key exists, False otherwise."""
persistence = saq.db.query(Persistence).filter(Persistence.source_id == self.persistence_source.id,
Persistence.uuid == key_name).first()
return persistence is not None
|
[
"unixfreak0037@gmail.com"
] |
unixfreak0037@gmail.com
|
51be6a501ac26b5d773bf8b304e3986f6b5d3afd
|
e2d23d749779ed79472a961d2ab529eeffa0b5b0
|
/pipeline/component_framework/component.py
|
78c9c838bffb9e79b9aee8600ea2886df757a6c7
|
[
"MIT",
"BSD-3-Clause",
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
manlucas/atom
|
9fa026b3f914e53cd2d34aecdae580bda09adda7
|
94963fc6fdfd0568473ee68e9d1631f421265359
|
refs/heads/master
| 2022-09-30T06:19:53.828308
| 2020-01-21T14:08:36
| 2020-01-21T14:08:36
| 235,356,376
| 0
| 0
|
NOASSERTION
| 2022-09-16T18:17:08
| 2020-01-21T14:04:51
|
Python
|
UTF-8
|
Python
| false
| false
| 2,699
|
py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from pipeline.core.data.base import DataObject
from pipeline.core.data.converter import get_variable
from pipeline.exceptions import ComponentDataLackException
from pipeline.component_framework.base import ComponentMeta
class Component(object):
__metaclass__ = ComponentMeta
def __init__(self, data_dict):
self.data_dict = data_dict
@classmethod
def outputs_format(cls):
outputs = cls.bound_service().outputs()
outputs = map(lambda oi: oi.as_dict(), outputs)
return outputs
@classmethod
def inputs_format(cls):
inputs = cls.bound_service().inputs()
inputs = map(lambda ii: ii.as_dict(), inputs)
return inputs
@classmethod
def _get_item_schema(cls, type, key):
items = getattr(cls.bound_service(), type)()
for item in items:
if item.key == key:
return item
return None
@classmethod
def get_output_schema(cls, key):
return cls._get_item_schema(type='outputs', key=key).schema
@classmethod
def get_input_schema(cls, key):
return cls._get_item_schema(type='inputs', key=key).schema
@classmethod
def form_is_embedded(cls):
return getattr(cls, 'embedded_form', False)
def clean_execute_data(self, context):
"""
@summary: hook for subclass of Component to clean execute data with context
@param context:
@return:
"""
return self.data_dict
def data_for_execution(self, context, pipeline_data):
data_dict = self.clean_execute_data(context)
inputs = {}
for key, tag_info in data_dict.items():
if tag_info is None:
raise ComponentDataLackException('Lack of inputs: %s' % key)
inputs[key] = get_variable(key, tag_info, context, pipeline_data)
return DataObject(inputs)
def service(self):
return self.bound_service()
|
[
"lucaswang@canway.net"
] |
lucaswang@canway.net
|
d2fb7dcbe451a07d724df45a7878f626dacd3386
|
64267b1f7ca193b0fab949089b86bc7a60e5b859
|
/slehome/account/migrations/0097_auto_20150202_0039.py
|
22375ff145f8b8257f860a9b8807a456a1d3db1d
|
[] |
no_license
|
hongdangodori/slehome
|
6a9f2b4526c2783932627b982df0540762570bff
|
3e558c78c3943dadf0ec485738a0cc98dea64353
|
refs/heads/master
| 2021-01-17T12:00:34.221088
| 2015-02-06T13:44:00
| 2015-02-06T13:44:00
| 28,847,585
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 711
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('account', '0096_auto_20150201_2051'),
]
operations = [
migrations.AlterField(
model_name='basicmemberinformation',
name='auth_key',
field=models.CharField(default='3635261e81ece5847f80557667f19c082cb20feb39a3fbaeb31df9b5fe98a104', max_length=64),
preserve_default=True,
),
migrations.AlterField(
model_name='stat',
name='stat_value',
field=models.IntegerField(),
preserve_default=True,
),
]
|
[
"chungdangogo@gmail.com"
] |
chungdangogo@gmail.com
|
52f2f4d3f8ca4356fc81a84b1d54b9beb4ba9fbb
|
698069070a63a49e0b29852af71228f88b2596d7
|
/digitalocean.py
|
d33fdee76940229bf4ec6380ccb30de07b1aca50
|
[] |
no_license
|
DevOpsHW/DevOps-HW1
|
7f3682c00a49d10ba7a9ee2bee8a547592e25f58
|
2ea59b136b3bdd6e0d94f124e3a5fb76e057209c
|
refs/heads/master
| 2020-04-14T13:19:24.587756
| 2015-09-24T00:26:52
| 2015-09-24T00:26:52
| 42,484,702
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,345
|
py
|
import requests
import json
import os
import time
import sys
class Droplet():
def __init__(self, *args, **kwargs):
if kwargs:
self.id = kwargs['id']
self.name = kwargs['name']
self.ip = kwargs['networks']['v4'][0]['ip_address']
self.status = kwargs['status']
else:
self.id = args[0]
self.name = args[1]
self.ip = None
self.status = None
def updateStatus(self, conn):
r = conn.retrieveDroplet(self.id)
self.status = r['droplet']['status']
if self.status == 'active':
self.ip = r['droplet']['networks']['v4'][0]['ip_address']
def checkStatus(self, conn):
self.status = conn.retrieveDroplet(self.id)['droplet']['status']
print "Droplet status is " + self.status
return self.status
def __eq__(self, other):
return self.id == other.id
def __str__(self):
return "ID: %d, Name: %s, IP: %s, Status: %s" %(self.id, self.name, self.ip, self.status)
class Digitalocean():
def __init__(self, token):
self.headers = {
'Authorization': 'Bearer ' + token,
"Content-Type": 'application/json'
}
self.droplets = []
self.ips = []
def listRegions(self):
r = requests.get("https://api.digitalocean.com/v2/regions", headers=self.headers)
for region in r.json()['regions']:
print region['slug']
def listImages(self):
r = requests.get("https://api.digitalocean.com/v2/images", headers=self.headers)
for image in r.json()['images']:
print image['slug']
def createDroplet(self, name, region, imageName):
data = {
"name": name,
"region": region,
"size": "512mb",
"ssh_keys": self.getSSHkeyID(),
"image": imageName,
"backups": bool(None),
"ipv6": bool(False),
'virtio': False,
"private_networking": bool(None),
"user_data": None,
}
json_params = json.dumps(data)
r = requests.post('https://api.digitalocean.com/v2/droplets', data=json_params, headers=self.headers)
droplet = Droplet(r.json()['droplet']['id'], name, r.json()['droplet']['status'])
print "Droplet created, ID: ", droplet.id
while(True):
dr = self.retrieveDroplet(droplet.id)['droplet']
if 'v4' in dr['networks'].keys() and len(dr['networks']['v4']) > 0:
print "Get IP address: %s" % dr['networks']['v4'][0]['ip_address']
droplet.ip = dr['networks']['v4'][0]['ip_address']
break
else:
print "Waiting for IP address"
time.sleep(3)
self.droplets.append(Droplet(**dr))
self.ips.append(self.droplets[-1].ip)
return dr
def getSSHkeyID(self):
res = list()
r = requests.get("https://api.digitalocean.com/v2/account/keys", headers=self.headers)
for id in r.json()['ssh_keys']:
res.append(id['id'])
return res
def getDropletsList(self):
res = []
r = requests.get("https://api.digitalocean.com/v2/droplets", headers=self.headers)
for droplet in r.json()['droplets']:
res.append((droplet['id'], droplet['name'], droplet['networks']['v4'][0]['ip_address'], droplet['status']))
return res
def retrieveDroplet(self, dropletID):
r = requests.get("https://api.digitalocean.com/v2/droplets/" + str(dropletID), headers=self.headers)
return r.json()
def deleteDroplet(self, dropletID):
r = requests.delete("https://api.digitalocean.com/v2/droplets/" + str(dropletID), params=None, headers=self.headers)
print r.content
def destorySSHKey(self, key_id):
r = requests.delete("https://api.digitalocean.com/v2/account/keys/" + str(key_id), headers=self.headers)
print r.content
def createSSHKey(self, name, public_key_path):
f = open(public_key_path, 'r')
key = f.read()
f.close()
data = {
"name": name,
"public_key": key
}
json_params = json.dumps(data)
r = requests.post("https://api.digitalocean.com/v2/account/keys", data=json_params, headers=self.headers)
print r.content
return r.json()['ssh_key']['id']
def createInventory(self, key_file):
key_file = os.path.abspath(key_file)
f = open('inventory', 'r')
text = f.read()
f.close()
f = open('inventory', 'ab')
print "Writing to inventory."
for droplet in [(x[0], x[2]) for x in self.getDropletsList()]:
if droplet[1] not in text:
s = '%d ansible_ssh_host=%s ansible_ssh_user=root ansible_ssh_private_key_file=%s' % (droplet[0], droplet[1], key_file,)
print s
print >> f, s
f.close()
def checkIfAllActive(self):
if all( [droplet[3] == 'active' for droplet in self.getDropletsList()]):
return True
else:
return False
# token = os.environ["DO_TOKEN"]
# conn = Digitalocean(token)
# for droplet in conn.getDropletsList():
# print droplet
# conn.deleteDroplet(droplet[0])
|
[
"kgong@ncsu.edu"
] |
kgong@ncsu.edu
|
c2967c440e90bbd9acfffcffd7a7062990563747
|
4469139b6bb093e2cb6cfca85b74d70836fb8056
|
/python-notebook/tests/test_notebook.py
|
29aab0dc3cc875e3e312cc854661c5ea641a25ed
|
[
"MIT"
] |
permissive
|
ucphhpc/nbi-jupyter-docker-stacks
|
ef30abfa6fcaf606bb75f35ff18526cf1abb416a
|
f1b5c83efb2df0f2e07c7c7d0199b650a3b33750
|
refs/heads/master
| 2023-08-31T09:31:18.096582
| 2023-08-25T11:36:12
| 2023-08-25T11:36:12
| 148,879,765
| 6
| 2
|
MIT
| 2023-03-27T07:08:42
| 2018-09-15T07:25:22
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,384
|
py
|
import os
import subprocess
import tempfile
import nbformat
cur_path = os.path.abspath(".")
notebooks_path = os.path.join(cur_path, "notebooks")
kernels = ["python3"]
def _notebook_run(path, kernel="python3", timeout=300):
"""Execute a notebook via nbconvert and collect output.
:returns (parsed nb object, execution errors)
"""
dirname, __ = os.path.split(path)
os.chdir(dirname)
with tempfile.NamedTemporaryFile(suffix=".ipynb") as fout:
args = [
"jupyter",
"nbconvert",
"--to",
"notebook",
"--execute",
"--ExecutePreprocessor.timeout={}".format(timeout),
"--ExecutePreprocessor.kernel_name={}".format(kernel),
"--output",
fout.name,
path,
]
subprocess.check_call(args)
fout.seek(0)
nb = nbformat.read(fout, nbformat.current_nbformat)
errors = [
output
for cell in nb.cells
if "outputs" in cell
for output in cell["outputs"]
if output.output_type == "error"
]
return nb, errors
def test_notebooks():
for f_notebook in os.listdir(notebooks_path):
for kernel in kernels:
_, errors = _notebook_run(
os.path.join(notebooks_path, f_notebook), kernel=kernel
)
assert errors == []
|
[
"munk1@live.dk"
] |
munk1@live.dk
|
9ad6372f1990c0bf87835f2cc9cc0edb20415089
|
0c3757dfd4e0a4b8201bc4d2b040029fd7a62e9c
|
/listEx.py
|
b0b0863b44e2139cbd395b2d45d3edbb88c3ea36
|
[] |
no_license
|
RubelAhmed57/Python-Bangla
|
41074164c7f4a076747a339589f18dc73137414c
|
cdde682dd97e8081afbd406b00dad7aa5cd038c4
|
refs/heads/master
| 2021-06-26T15:48:34.674659
| 2017-09-09T15:35:56
| 2017-09-09T15:35:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
item = [1, 2,4,3,5,6,7,8]
print(type(item))
item2 = ["A", "C", "B"]
print(item2)
print(" sort function")
resize = list(sorted(item))
print(resize)
print("Reverse function")
re = list(reversed(resize))
print(re)
print(" adding system ")
resize.append(9)
print(resize)
print("Remove system ")
resize.remove(1)
print(resize)
print("...................")
a = {1,3,4,5}
print(type(a))
|
[
"vubon.roy@gmail.com"
] |
vubon.roy@gmail.com
|
9aa74c13724d80155c54c651c7793d6ebbb844c1
|
9dba277eeb0d5e9d2ac75e2e17ab5b5eda100612
|
/exercises/1901010167/d09/main.py
|
6212354875adecd6ba0d9a597876c1306511eabe
|
[] |
no_license
|
shen-huang/selfteaching-python-camp
|
e8410bfc06eca24ee2866c5d890fd063e9d4be89
|
459f90c9f09bd3a3df9e776fc64dfd64ac65f976
|
refs/heads/master
| 2022-05-02T05:39:08.932008
| 2022-03-17T07:56:30
| 2022-03-17T07:56:30
| 201,287,222
| 9
| 6
| null | 2019-08-08T15:34:26
| 2019-08-08T15:34:25
| null |
UTF-8
|
Python
| false
| false
| 294
|
py
|
from mymodule import stats_word
if __name__ == "__main__":
try:
stats_word.stats_file('D:/Documents/GitHub/selfteaching-python-camp/exercises/1901010167/d09/tang300.json','r',100,0)
except ValueError :
print('输入的为非字符串')
|
[
"2470962+srvz@users.noreply.github.com"
] |
2470962+srvz@users.noreply.github.com
|
1b10619c4db7046649eca80fc0c3e8be0f29b2ff
|
a7d6807af3b8b67d0ac93b014d9a688d8d612539
|
/graphs/create_adj_list.py
|
46891842757d6888c5e7a1a1eb60009541c089d0
|
[] |
no_license
|
shehryarbajwa/Algorithms--Datastructures
|
06a9f86bedbf40909011365cf169d5c77791b643
|
cbd456e0db9ea6a542926015aeee4bd2ceff9ff9
|
refs/heads/master
| 2023-04-11T21:00:17.438588
| 2021-05-11T23:19:05
| 2021-05-11T23:19:05
| 193,496,161
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 734
|
py
|
def create_adj_list(edges, amount_of_vertices):
graph = [[] for _ in range(amount_of_vertices)]
for u,v in edges:
graph[u].append(v)
graph[v].append(u)
return graph
def create_adj_matrix(edges, amount_of_vertices):
graph = [[None for _ in range(amount_of_vertices)] for _ in range(amount_of_vertices)]
for u,v in edges:
graph[u][v] = True
graph[v][u] = True
return graph
def num_edges(graph):
# running_sum = 0
# for edge in graph:
# running_sum += 1
# return running_sum
return sum(len(edge) for edge in graph)
print(create_adj_list([(0,1),(1,2),(0,2)],3))
print(create_adj_matrix([(0,1),(1,2),(0,2)],3))
print(num_edges([(0,1),(1,2),(0,2)]))
|
[
"shehryar.bajwa@gmail.com"
] |
shehryar.bajwa@gmail.com
|
a0c5fee17f2a900bdabde9b78064baad397e371c
|
e6f45fba3f8c6b6fba07f51e6cf3ae848e711551
|
/center_three/voice_label/voice_quality/test_regex.py
|
a9a58680a6e3da39099c5c611f31ae81b35feda5
|
[] |
no_license
|
yangwen1997/ASR
|
c0a49de2bcde7fe0a8806d4b7c1d2f2faeda0c70
|
8d8691f74646a1f66573ecbeaf022911f8363b55
|
refs/heads/master
| 2022-12-07T14:36:20.842942
| 2020-09-08T02:34:31
| 2020-09-08T02:34:31
| 293,677,799
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,503
|
py
|
# !/usr/bin/env Python3
# -*- coding: utf-8 -*-
# @Author : wangchen
# @FILE : run.py
# @Time : 2020/7/13 20:55
# @Software : PyCharm
import os
import sys
sys.path.append(os.environ['PUSHPATH'])
from center_three.voice_label.voice_quality.data_processing_addfunc import Data_Processing
from center_three.voice_label.utils import Insert_Into_Mysql, table_data, Read_From_Mysql
from center_three.voice_label.config import *
import pymysql
import time
import warnings
warnings.filterwarnings("ignore")
from datetime import datetime
import numpy as np
import pandas as pd
def pids():
read = Read_From_Mysql()
pids_df_1 = read.select_from_table(insert_database, "select distinct pid from q_yt_lable_result_202008")
pids_df_2 = read.select_from_table(call_createtime_database, "select distinct pid from asr_list_202008")
pids_df = pd.concat([pids_df_1, pids_df_2], axis=0)
pids_df.columns = ["pids"]
pids_df.drop_duplicates(inplace=True)
pids_df.reset_index(drop=True, inplace=True)
import random
pid_index = random.sample(set(pids_df.index), int(np.round(len(pids_df) / 20)))
pids = pids_df.iloc[pid_index, :]
pids = set(pids.pids)
del read
return pids
def test_main(pid, timestamp, type_code):
insert_func = Insert_Into_Mysql(host='106.12.108.97',
user ='root',
password ="80390cb1a66549e1",
port =3306)
data_process = Data_Processing()
data_process_debug = Data_Processing()
data_process_debug.read = Read_From_Mysql(host='106.12.108.97',
user ='root',
password ="80390cb1a66549e1",
port =3306)
start = time.time()
try:
print("======================================我要开始提取语音标签了,pid:%s===================================="%pid)
text_data = data_process.get_text_data(pid, timestamp)
regex_df = data_process_debug.get_rex_df(type_code) # 商务客户的正则
customer_regex_df = data_process_debug.get_customer_regex_df(type_code) # 获取提取客户文本正则的df
if len(regex_df) == 0:
print("label库里没有数据")
elif len(text_data) == 0:
print("没有通话数据")
elif len(customer_regex_df) == 0:
print("客户文本正则没有数据")
else:
# TODO ==================================根据商务客户对话提取标签==================================
print("====================我要根据商务客户对话提取标签================")
match_all_df, match_key_df, no_match_df = data_process.match_regex(text_data, regex_df)
res_table_name = Res_Table.format(table_data(timestamp))
unmatch_table_name = Unmatch_Table.format(table_data(timestamp))
if len(match_all_df) != 0:
insert_func.flush_hosts(Res_Data_Base)
try:
insert_func.insert_data_multi(match_all_df, Res_Data_Base, res_table_name)
print("存入匹配数据")
except pymysql.ProgrammingError:
insert_func.create_table(Res_Data_Base, res_table_name)
insert_func.insert_data_multi(match_all_df, Res_Data_Base, res_table_name)
print("建表{}存入匹配数据".format(res_table_name), data_process.table_data(time.time()))
if len(match_key_df) != 0:
insert_func.flush_hosts(Res_Data_Base)
try:
insert_func.insert_data_multi(match_key_df, Res_Data_Base,unmatch_table_name)
print("存入未匹value配数据")
except pymysql.ProgrammingError:
insert_func.create_table(Res_Data_Base, unmatch_table_name)
insert_func.insert_data_multi(match_key_df, Res_Data_Base, unmatch_table_name)
print("建表{}存入未匹配数据".format(unmatch_table_name), data_process.table_data(time.time()))
if len(no_match_df) != 0:
insert_func.flush_hosts(Res_Data_Base)
try:
insert_func.insert_data_multi(no_match_df, Res_Data_Base, unmatch_table_name)
print("存入未匹配数据")
except pymysql.ProgrammingError:
insert_func.create_table(Res_Data_Base, unmatch_table_name)
insert_func.insert_data_multi(no_match_df, Res_Data_Base, unmatch_table_name)
print("建表{}存入匹配数据".format(unmatch_table_name), data_process.table_data(time.time()))
print("====================我要根据客户文本提取标签================")
# customer_regex_df = data_process.get_customer_regex_df(type_code) # 获取客户正则的df
customer_match_all_df, customer_no_match_df, customer_result = data_process_debug.match_customer_label(text_data, customer_regex_df, pid, timestamp)
customer_res_table_name = Customer_Res_Table.format(table_data(timestamp))
# print("匹配成功的表,表名:%s"%customer_res_table_name)
customer_unmatch_table_name = Customer_Unmatch_Table.format(table_data(timestamp))
# print("未匹配成功的表,表名:%s" %customer_unmatch_table_name)
if len(customer_match_all_df) != 0:
insert_func.flush_hosts(Res_Data_Base)
try:
insert_func.insert_data_multi(customer_match_all_df, Res_Data_Base, customer_res_table_name)
print("存入匹配数据")
except pymysql.ProgrammingError:
print("没有匹配成功结果表,需要自己建表")
# print(customer_res_table_name)
insert_func.create_customer_label_table(Res_Data_Base, customer_res_table_name)
insert_func.insert_data_multi(customer_match_all_df, Res_Data_Base, customer_res_table_name)
print("建表{}存入匹配数据".format(customer_res_table_name), data_process.table_data(time.time()))
if len(customer_no_match_df) != 0:
insert_func.flush_hosts(Res_Data_Base)
try:
insert_func.insert_data_multi(customer_no_match_df, Res_Data_Base, customer_unmatch_table_name)
print("存入匹配数据")
except pymysql.ProgrammingError:
print("没有匹配未成功结果表,需要自己建表")
insert_func.create_customer_label_table(Res_Data_Base, customer_unmatch_table_name)
insert_func.insert_data_multi(customer_no_match_df, Res_Data_Base, customer_unmatch_table_name)
print("建表{}存入匹配数据".format(customer_unmatch_table_name), data_process.table_data(time.time()))
except Exception as e:
print(e)
finally:
print("耗时", time.time()-start)
del insert_func
del data_process
del data_process_debug
def now_data(timestamp):
"""
:param timestamp: 时间戳
:return: 年月 (格式)->2020-07-01 10:10:10
"""
dt = datetime.fromtimestamp(int(timestamp))
date = dt.strftime('%Y-%m-%d %H:%M:%S').strip()
return date
def run():
pid_set = pids()
timestamp = time.time()
type_code = ""
for pis in pid_set:
test_main(pis,timestamp,type_code)
if __name__ == '__main__':
run()
|
[
"1120021365@qq.com"
] |
1120021365@qq.com
|
faf34fd6b3c2704e4a6a1710e4d5c16d9fafcb1b
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_322/ch73_2019_04_06_21_23_14_306424.py
|
a879897561f7b920a0c6251106cdf3d6805deb08
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 195
|
py
|
def remove_vogais(n):
lista = []
vogais = ["a", "e", "i", "o", "u"]
for i in n:
if i not in vogais:
lista.append(i)
lista = "".join(lista)
return lista
|
[
"you@example.com"
] |
you@example.com
|
6e1173e136811e0d667fdb4de6e48847d65e0fed
|
d73409535734a788af83a9b2b2e32dd1b979d5d2
|
/proxySTAR_V3/certbot/venv.1509389747.bak/lib/python2.7/site-packages/twine/repository.py
|
7aeebcd2feec22f9e35f8ee11a1c85cd6c2ed485
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
mami-project/lurk
|
adff1fb86cb3e478fe1ded4cbafa6a1e0b93bfdd
|
98c293251e9b1e9c9a4b02789486c5ddaf46ba3c
|
refs/heads/master
| 2022-11-02T07:28:22.708152
| 2019-08-24T19:28:58
| 2019-08-24T19:28:58
| 88,050,138
| 2
| 2
|
NOASSERTION
| 2022-10-22T15:46:11
| 2017-04-12T12:38:33
|
Python
|
UTF-8
|
Python
| false
| false
| 7,083
|
py
|
# Copyright 2015 Ian Cordasco
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, unicode_literals, print_function
from tqdm import tqdm
import requests
from requests import adapters
from requests import codes
from requests.packages.urllib3 import util
from requests_toolbelt.multipart import (
MultipartEncoder, MultipartEncoderMonitor
)
from requests_toolbelt.utils import user_agent
import twine
KEYWORDS_TO_NOT_FLATTEN = set(["gpg_signature", "content"])
LEGACY_PYPI = 'https://pypi.python.org/'
WAREHOUSE = 'https://upload.pypi.org/'
OLD_WAREHOUSE = 'https://upload.pypi.io/'
class ProgressBar(tqdm):
def update_to(self, n):
"""Update the bar in the way compatible with requests-toolbelt.
This is identical to tqdm.update, except ``n`` will be the current
value - not the delta as tqdm expects.
"""
self.update(n - self.n)
class Repository(object):
def __init__(self, repository_url, username, password):
self.url = repository_url
self.session = requests.session()
self.session.auth = (username, password)
self.session.headers['User-Agent'] = self._make_user_agent_string()
for scheme in ('http://', 'https://'):
self.session.mount(scheme, self._make_adapter_with_retries())
self._releases_json_data = {}
@staticmethod
def _make_adapter_with_retries():
retry = util.Retry(
connect=5,
total=10,
method_whitelist=['GET'],
status_forcelist=[500, 501, 502, 503],
)
return adapters.HTTPAdapter(max_retries=retry)
@staticmethod
def _make_user_agent_string():
from twine import cli
dependencies = cli.list_dependencies_and_versions()
return user_agent.UserAgentBuilder(
'twine', twine.__version__,
).include_extras(
dependencies
).include_implementation().build()
def close(self):
self.session.close()
@staticmethod
def _convert_data_to_list_of_tuples(data):
data_to_send = []
for key, value in data.items():
if (key in KEYWORDS_TO_NOT_FLATTEN or
not isinstance(value, (list, tuple))):
data_to_send.append((key, value))
else:
for item in value:
data_to_send.append((key, item))
return data_to_send
def set_certificate_authority(self, cacert):
if cacert:
self.session.verify = cacert
def set_client_certificate(self, clientcert):
if clientcert:
self.session.cert = clientcert
def register(self, package):
data = package.metadata_dictionary()
data.update({
":action": "submit",
"protocol_version": "1",
})
print("Registering {0}".format(package.basefilename))
data_to_send = self._convert_data_to_list_of_tuples(data)
encoder = MultipartEncoder(data_to_send)
resp = self.session.post(
self.url,
data=encoder,
allow_redirects=False,
headers={'Content-Type': encoder.content_type},
)
# Bug 28. Try to silence a ResourceWarning by releasing the socket.
resp.close()
return resp
def _upload(self, package):
data = package.metadata_dictionary()
data.update({
# action
":action": "file_upload",
"protcol_version": "1",
})
data_to_send = self._convert_data_to_list_of_tuples(data)
print("Uploading {0}".format(package.basefilename))
with open(package.filename, "rb") as fp:
data_to_send.append((
"content",
(package.basefilename, fp, "application/octet-stream"),
))
encoder = MultipartEncoder(data_to_send)
with ProgressBar(total=encoder.len, unit='bytes',
unit_scale=True, leave=False) as bar:
monitor = MultipartEncoderMonitor(
encoder, lambda monitor: bar.update_to(monitor.bytes_read)
)
resp = self.session.post(
self.url,
data=monitor,
allow_redirects=False,
headers={'Content-Type': monitor.content_type},
)
return resp
def upload(self, package, max_redirects=5):
number_of_redirects = 0
while number_of_redirects < max_redirects:
resp = self._upload(package)
if resp.status_code == codes.OK:
return resp
if 500 <= resp.status_code < 600:
number_of_redirects += 1
print('Received "{status_code}: {reason}" Package upload '
'appears to have failed. Retry {retry} of 5'.format(
status_code=resp.status_code,
reason=resp.reason,
retry=number_of_redirects,
))
else:
return resp
return resp
def package_is_uploaded(self, package, bypass_cache=False):
# NOTE(sigmavirus24): Not all indices are PyPI and pypi.io doesn't
# have a similar interface for finding the package versions.
if not self.url.startswith((LEGACY_PYPI, WAREHOUSE, OLD_WAREHOUSE)):
return False
safe_name = package.safe_name
releases = None
if not bypass_cache:
releases = self._releases_json_data.get(safe_name)
if releases is None:
url = '{url}pypi/{package}/json'.format(package=safe_name,
url=LEGACY_PYPI)
headers = {'Accept': 'application/json'}
response = self.session.get(url, headers=headers)
if response.status_code == 200:
releases = response.json()['releases']
else:
releases = {}
self._releases_json_data[safe_name] = releases
packages = releases.get(package.metadata.version, [])
for uploaded_package in packages:
if uploaded_package['filename'] == package.basefilename:
return True
return False
def verify_package_integrity(self, package):
# TODO(sigmavirus24): Add a way for users to download the package and
# check it's hash against what it has locally.
pass
|
[
"diego.deaguilarcanellas@telefonica.com"
] |
diego.deaguilarcanellas@telefonica.com
|
902e1d1b428f38fde1ca93edfaae6cde5fc512c6
|
2e7d24351eb91ebf57d2dae23606b20661de6dd3
|
/circuitpython/frozen/Adafruit_CircuitPython_BusDevice/adafruit_bus_device/i2c_device.py
|
d2ee118dc9a7b1c65b700920e096100a65a59646
|
[
"MIT"
] |
permissive
|
BitKnitting/wakey_circuitpython
|
9978d4a437cf7d9e671a1c9b3e10aa7cb3735df9
|
44da963f7f0422f93513f8b9efcdc6b9b704dc42
|
refs/heads/master
| 2020-03-21T15:46:18.362833
| 2018-08-31T12:33:07
| 2018-08-31T12:33:07
| 138,731,763
| 3
| 2
| null | 2018-07-03T12:03:54
| 2018-06-26T12:00:44
|
Python
|
UTF-8
|
Python
| false
| false
| 4,107
|
py
|
# The MIT License (MIT)
#
# Copyright (c) 2016 Scott Shawcroft for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_bus_device.i2c_device` - I2C Bus Device
====================================================
"""
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_BusDevice.git"
class I2CDevice:
"""
Represents a single I2C device and manages locking the bus and the device
address.
:param ~busio.I2C i2c: The I2C bus the device is on
:param int device_address: The 7 bit device address
.. note:: This class is **NOT** built into CircuitPython. See
:ref:`here for install instructions <bus_device_installation>`.
Example:
.. code-block:: python
import busio
from board import *
from adafruit_bus_device.i2c_device import I2CDevice
with busio.I2C(SCL, SDA) as i2c:
device = I2CDevice(i2c, 0x70)
bytes_read = bytearray(4)
with device:
device.readinto(bytes_read)
# A second transaction
with device:
device.write(bytes_read)
"""
def __init__(self, i2c, device_address):
# Verify that a deivce with that address exists.
while not i2c.try_lock():
pass
try:
i2c.writeto(device_address, b'')
except OSError:
raise ValueError("No I2C device at address: %x" % device_address)
finally:
i2c.unlock()
self.i2c = i2c
self.device_address = device_address
def readinto(self, buf, **kwargs):
"""
Read into ``buf`` from the device. The number of bytes read will be the
length of ``buf``.
If ``start`` or ``end`` is provided, then the buffer will be sliced
as if ``buf[start:end]``. This will not cause an allocation like
``buf[start:end]`` will so it saves memory.
:param bytearray buffer: buffer to write into
:param int start: Index to start writing at
:param int end: Index to write up to but not include
"""
self.i2c.readfrom_into(self.device_address, buf, **kwargs)
def write(self, buf, **kwargs):
"""
Write the bytes from ``buffer`` to the device. Transmits a stop bit if
``stop`` is set.
If ``start`` or ``end`` is provided, then the buffer will be sliced
as if ``buffer[start:end]``. This will not cause an allocation like
``buffer[start:end]`` will so it saves memory.
:param bytearray buffer: buffer containing the bytes to write
:param int start: Index to start writing from
:param int end: Index to read up to but not include
:param bool stop: If true, output an I2C stop condition after the buffer is written
"""
self.i2c.writeto(self.device_address, buf, **kwargs)
def __enter__(self):
while not self.i2c.try_lock():
pass
return self
def __exit__(self, *exc):
self.i2c.unlock()
return False
|
[
"farmerrobbie@freshsalad.today"
] |
farmerrobbie@freshsalad.today
|
f979fef97028dcfc003ffad3e0d58622e1a3d5c9
|
01fcf89c5a4f1c67621e07c99a6092a27e92f34e
|
/homework_sample_answer/chap11/chap_11_01.py
|
235eb1cbd7d84e732fbce991ab71320ba64ae7e5
|
[] |
no_license
|
fintalk/study-python
|
f3d86c06c4e3cdd2fcadf051ed883c78117f339e
|
62cb9a9df0493e671b5ccb7fcb14703466fab324
|
refs/heads/main
| 2023-04-21T01:18:01.356090
| 2021-05-14T03:59:13
| 2021-05-14T03:59:13
| 317,142,313
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,316
|
py
|
# P350 の urillibモジュールの機能を使う、を写経して実行してください
pass
# P351 を 読んでOrderedDictを使う、を写経して実行してください
from collections import OrderedDict
od = OrderedDict()
od["a"] = "A"
od["c"] = "C"
od["b"] = "B"
print(od)
d = {}
d["a"] = "A"
d["c"] = "C"
d["b"] = "B"
print(d)
# od から B を削除して下さい
od.popitem()
print(od)
# OrderedDict の公式ドキュメントを探してURLを記述して下さい。
# https://docs.python.org/ja/3/library/collections.html?highlight=ordereddict#collections.OrderedDict
# chap_06_02.md で勉強した dir 関数を使って od が持つメソッドや型を調べてプリントして下さい
print(dir(od))
# ⇑ のメソッドから、 od から値だけ取得するメソッドを探して実行して下さい。
print(od.values())
# P353のタプルからディクショナリを作る、を写経して実行してください
pass
# P354のsetdefault()メソッドを使う、を写経して実行してください
pass
# P354のdefalutdict を使う、を写経して実行してください。
pass
# defaultdictの公式ドキュメントを探してURLを記述して下さい。
# https://docs.python.org/ja/3/library/collections.html?highlight=defaultdict#collections.defaultdict
# P355の bisectモジュールの公式ドキュメントを探してURLを記述して下さい。
# https://docs.python.org/ja/3/library/bisect.html?highlight=bisect#module-bisect
# 以下のリストをソートして下さい
names = ['Emmalyn', 'Sorcha', 'Ina', 'Rennie', 'Blinni']
names.sort() # 破壊的変更
print(names)
# ソートした names にP355の insort_left をつかって Taro を挿入してください。注意:insort_left は bisect モジュールの関数ですので、import する必要があります。OrderedDict をインポートしたコードを参考にしてみてください。
import bisect
bisect.insort_left(names,"Taro")
print(names)
# ソートした names にP355の insort_left をつかって Hanako を挿入してください
bisect.insort_left(names,"Hanako")
print(names)
# ソートした names にP355の insort をつかって shinseitaro を挿入してください
bisect.insort(names,"shinseitaro")
print(names)
|
[
"shinsei.all@gmail.com"
] |
shinsei.all@gmail.com
|
9c9bcfe777119a1657627881597b927da34239e2
|
24cf6d01fc9485c2e5578523bce6313aab47a30e
|
/DataLoaders/Flat_DataLoader.py
|
8118ed234c95d2e2ce6c7b176ab4bd64ba986bd0
|
[] |
no_license
|
sahahn/GenDiagFramework
|
352212b2c540a6db73e810e416a9d3d4fa84f95a
|
29498d3667d644d5b3a8fd0f0e277cbdd14027ba
|
refs/heads/master
| 2020-04-14T06:10:10.609366
| 2019-06-18T20:22:31
| 2019-06-18T20:22:31
| 163,678,894
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,328
|
py
|
from DataLoaders.IQ_DataLoader import IQ_DataLoader
import os
import numpy as np
def load_map(dr, name, ind):
left = np.load(dr + name + '-lh.npy')[:,:,ind]
right = np.load(dr + name + '-rh.npy')[:,:,ind]
full = np.stack([left, right], axis=-1)
return full
class Flat_DataLoader(IQ_DataLoader):
''' Input size refers to inds to select because Im a bad person'''
def load_data(self):
all_names = os.listdir(self.init_location)
all_names = [name for name in all_names if 'NDAR' in name]
names = [name.split('-')[0] for name in all_names]
names = list(set([name for name in names if name + '-rh.npy' in all_names and
name + '-lh.npy' in all_names and
name in self.iq_dict]))
map_inds = self.input_size
if self.preloaded == False:
s_norm_info = []
for ind in map_inds:
data = np.array([load_map(self.init_location, name, ind) for name in names[:500]])
data_mean = np.mean(data)
data -= data_mean
data_max = np.max(data)
data /= data_max
imax = np.max(data)
imin = np.min(data)
s_norm_info.append([data_mean, data_max, imax, imin])
for name in names:
if (len(self.data_points) < self.limit) and (name in self.iq_dict):
label = self.iq_dict[name]
dp = self.create_data_point(name, label)
if self.preloaded == False:
all_maps = []
for i in range(len(map_inds)):
data = load_map(self.init_location, name, map_inds[i])
data = data.astype('float32')
data -= s_norm_info[i][0]
data /= s_norm_info[i][1]
data -= s_norm_info[i][3]
data /= (s_norm_info[i][2] - s_norm_info[i][3])
all_maps.append(data)
all_maps = np.concatenate(all_maps, axis=-1)
dp.set_data(all_maps)
self.data_points.append(dp)
|
[
"sahahn@uvm.edu"
] |
sahahn@uvm.edu
|
81c41286026d665cd68a617771359ef7abdc7b15
|
0bcd3d105e53286a1f9970dfef1d92c2fb67d738
|
/tutorial/views.py
|
1ee1c6df0dcde5662f94be624ea3851fd672e239
|
[] |
no_license
|
billy0402/django-todo-list
|
b2b9c74f4152d835521cdf9d088a0f79cb10a246
|
0ee51612ea9a91d866055724f4c325de8568b96d
|
refs/heads/master
| 2022-03-20T15:55:03.911331
| 2019-09-28T01:08:39
| 2019-10-05T15:54:04
| 211,426,756
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,158
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def root(request):
s = '<h1>Hello World</h1>'
return HttpResponse(s)
def request_test(request):
# http://127.0.0.1:8000/tutorial/request_test/?s1=Hello&s2=World
s1 = request.GET.get('s1')
s2 = request.GET.get('s2')
return HttpResponse('<h1>{} {}</h1>'.format(s1, s2))
def render_test(request,n1,n2):
score = n1 + n2
return render(request,'render_test.html', {
'score': score
})
def homework(request, start, stop, base):
num_array = []
is_reversed = False
if start > stop:
# x = start
# start = stop
# stop = x
start, stop = stop, start
is_reversed = True
num_range = range(start,stop+1)
if is_reversed:
num_range = reversed(num_range)
if base == 1:
for num in num_range:
if num%2 == 1:
num_array.append(num)
else:
for num in num_range:
if num%2 == 0:
num_array.append(num)
return render(request,'homework.html',{
'num_array': num_array
})
|
[
"10646003@ntub.edu.tw"
] |
10646003@ntub.edu.tw
|
de55d3bb9bde3646fcaf7ebfc3094bcb62b0b6c3
|
2357a471980f925ba58a9045868ec2cc9ad2a7c7
|
/1haha.py
|
0224529ebfc4ce78344f3e945f1502d88015f02e
|
[] |
no_license
|
yyyuaaaan/pythonfirst
|
ec0a63eebf0ec753caf6119ce5c21178868f0135
|
8374e85646b0025be2689e96be2b3cbf6ac15109
|
refs/heads/master
| 2021-01-10T01:40:49.284641
| 2017-01-12T17:14:58
| 2017-01-12T17:14:58
| 52,094,871
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,858
|
py
|
"""
不要在iterate list 的时候改变它的值,会出错,要clone这个list再做操作。
for e1 in L1[:]: #在iterate的时候,拷贝一下list
L1=[1, 2, 3, 4]
L2=[1,2,5,6]
class BinaryTreeNode: # this will not work,python do not suppurt pointer, and recursion
def __init__(self,value=0):
self.value = value
self.left = BinaryTreeNode()
self.right = BinaryTreeNode()
stable sorting is good, donot exchange equal keys,
Sorting by insertion
Sorting by selection
Sorting by exchange
Unit test:会用stubs, 当软件还不存在,simulate来测试
L.sort 改变了list, sorted不改变list,return一个新的list
如果对dict 做sorted返回一个keys的list,如果用dict.sort,会出exception,因为dict没有sort
Some general rules of testing:
A testing unit should focus on one tiny bit of functionality and prove it correct.
Each test unit must be fully independent. Each of them must be able to run alone, and also within the test
suite, regardless of the order they are called. The implication of this rule is that each test must be loaded
with a fresh dataset and may have to do some cleanup afterwards. This is usually handled by setUp() and
tearDown() methods.
Try hard to make tests that run fast. If one single test needs more than a few millisecond to run, development
will be slowed down or the tests will not be run as often as desirable. In some cases, tests can’t be fast
because they need a complex data structure to work on, and this data structure must be loaded every time
the test runs. Keep these heavier tests in a separate test suite that is run by some scheduled task, and
run all other tests as often as needed.
Learn your tools and learn how to run a single test or a test case. Then, when developing a function inside
a module, run this function’s tests very often, ideally automatically when you save the code.
Always run the full test suite before a coding session, and run it again after. This will give you more
confidence that you did not break anything in the rest of the code.
It is a good idea to implement a hook that runs all tests before pushing code to a shared repository.
The first step when you are debugging your code is to write a new test pinpointing the bug. While it is not
always possible to do, those bug catching test are among the most valuable pieces of code in your project.
Use long and descriptive names for testing functions.
Regression testing
The intent of regression testing is to ensure that a change such as those mentioned above has not introduced
new faults.[1] One of the main reasons for regression testing is to determine whether a change in one part
of the software affects other parts of the software.[2]
Unit Test Libraries
The reasons why I choose and continue to use py.test are the simple test collection, the lack of boilerplate
and the ability to define set up and tear down functions at test, class or module level. For example for this
function:
def parse_connection(connection_string):
pass
Whilst the same test in py.test is more simple:
from parse_conn import parse_connection
import py.test
def test_not_at():
py.test.raises(ValueError, parse_connection, 'invalid uri')
"""
import py.test
#One more feature of py.test that is really useful is the ability to run all the tests in a subdirectory.
def test_div_zero():
py.test.raises(ZeroDivisionError, "1/0")
def func(x):
return x + 1
def test_answer():
assert func(3) == 5
def test_simple():
assert "42 is the answer" == str(42) + " " + "is the answer"
def test_multiply():
assert 42 == 6 * 7
def test_ord():
assert ord('a') + 1 == ord('b')
if __name__ == '__main__':
pytest.main()
def bsearch(l, value):
"""
shoude have left right index, and have a different solution see9.3
:param l:
:param value:
:return:
"""
lo, hi = 0, len(l)-1
while lo <= hi:
mid = (lo + hi) / 2
if l[mid] < value:
lo = mid + 1
elif value < l[mid]:
hi = mid - 1
else:
return mid
return -1
a = [1, 2, 3, 5, 9, 11, 15, 66]
print bsearch(a,7)
def merge_sort(A):
"""
Sort list A into order, and return result.
wrong
"""
n = len(A)
if n==1:
return A
mid = n//2 # floor division
L = merge_sort(A[:mid])
R = merge_sort(A[mid:])
return merge(L,R)
def merge(L,R):
"""
Given two sorted sequences L and R, return their merge.
"""
i = 0
j = 0
answer = []
while i<len(L) and j<len(R):
if L[i]<R[j]:
answer.append(L[i])
i += 1
else:
answer.append(R[j])
j += 1
if i<len(L):
answer.extend(L[i:])
if j<len(R):
answer.extend(R[j:])
return answer
|
[
"yyyuaaaan@gmail.com"
] |
yyyuaaaan@gmail.com
|
b50ea4f921725dc555649786318d9db6b0221eed
|
bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d
|
/lib/surface/vmware/private_clouds/management_dns_zone_bindings/create.py
|
0a1b65fdb80eb05ca00517f4a46682fb1e7a6968
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
05fbb473d629195f25887fc5bfaa712f2cbc0a24
|
392abf004b16203030e6efd2f0af24db7c8d669e
|
refs/heads/master
| 2023-08-31T05:40:41.317697
| 2023-08-23T18:23:16
| 2023-08-23T18:23:16
| 335,182,594
| 9
| 2
|
NOASSERTION
| 2022-10-29T20:49:13
| 2021-02-02T05:47:30
|
Python
|
UTF-8
|
Python
| false
| false
| 3,829
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'vmware private-clouds management-dns-zone-bindings create' command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.vmware.managementdnszonebinding import ManagementDNSZoneBindingClient
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.vmware import flags
from googlecloudsdk.core import log
DETAILED_HELP = {
'DESCRIPTION':
"""
Create a management DNS zone binding.
""",
'EXAMPLES':
"""
To create a management DNS zone binding called `my-mgmt-dns-zone-binding` that corresponds to the vmware engine network `sample-vmware-engine-network` in private cloud
`my-private-cloud`, in location `us-east2-b`, run:
$ {command} my-mgmt-dns-zone-binding --project=my-project --private-cloud=my-private-cloud --location=us-east2-b --vmware-engine-network=sample-vmware-engine-network
Or:
$ {command} my-mgmt-dns-zone-binding --private-cloud=my-private-cloud --vmware-engine-network=sample-vmware-engine-network
In the second example, the project and location are taken from gcloud properties `core/project` and `compute/zone` respectively.
""",
}
@base.Hidden
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Create(base.CreateCommand):
"""Create a management DNS zone binding."""
detailed_help = DETAILED_HELP
@staticmethod
def Args(parser):
"""Register flags for this command."""
flags.AddManagementDnsZoneBindingArgToParser(parser)
base.ASYNC_FLAG.AddToParser(parser)
base.ASYNC_FLAG.SetDefault(parser, True)
parser.display_info.AddFormat('yaml')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
'--vpc-network',
required=False,
help="""\
Resource name of the Google Cloud VPC network to bind to the management DNS zone of the private cloud.
""")
group.add_argument(
'--vmware-engine-network',
required=False,
help="""\
Resource name of VMware Engine network to bind to the management DNS zone of the private cloud.
""")
parser.add_argument(
'--description',
help="""\
Text describing the binding resource that represents the network getting bound to the management DNS zone.
""")
def Run(self, args):
mdzb = args.CONCEPTS.management_dns_zone_binding.Parse()
client = ManagementDNSZoneBindingClient()
is_async = args.async_
operation = client.Create(
mdzb,
vpc_network=args.vpc_network,
vmware_engine_network=args.vmware_engine_network,
description=args.description,
)
if is_async:
log.CreatedResource(
operation.name, kind='management DNS zone binding', is_async=True)
return
resource = client.WaitForOperation(
operation_ref=client.GetOperationRef(operation),
message=('waiting for management DNS zone binding [{}] ' +
'to be created').format(mdzb.RelativeName()))
log.CreatedResource(mdzb.RelativeName(), kind='management DNS zone binding')
return resource
|
[
"cloudsdk.mirror@gmail.com"
] |
cloudsdk.mirror@gmail.com
|
18275199fd9c26d3212b785f9a9f49ab7414ab26
|
79c8b6f17f22dd6b72b7ab228e38539797b3d1e9
|
/common/access/shortcuts.py
|
146b418bcf109b49d1a4c7e022692711308aac33
|
[] |
no_license
|
wd5/system
|
ac81e76413c620a225fdaff335f5c59f6ebf5bd0
|
26d8453965598e5b28bf2178c5cd01e637ac89b7
|
refs/heads/master
| 2021-01-17T22:07:50.130502
| 2013-01-22T14:37:08
| 2013-01-22T14:37:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,598
|
py
|
# -*- coding: utf-8 -*-
from django.contrib.auth.models import Group
from guardian.shortcuts import get_groups_with_perms, assign, remove_perm, get_perms_for_model
def check_perm_for_model(perm_name, model):
perms = get_perms_for_model(model)
for perm in perms:
if perm.codename == perm_name:
return True
return False
def assign_perm_for_groups_id(perm_name, object, groups_ids):
groups = Group.objects.filter(pk__in=groups_ids)
for group in groups:
assign(perm_name, group, object)
def remove_perm_for_groups_id(perm_name, object, groups_ids):
groups = Group.objects.filter(pk__in=groups_ids)
for group in groups:
remove_perm(perm_name, group, object)
def get_group_ids_for_object_perm(perm_name, object):
groups_dict = get_groups_with_perms(object, attach_perms=True)
groups_ids = []
for (group, perms) in groups_dict.iteritems():
if perm_name in perms: groups_ids.append(unicode(group.id))
return groups_ids
def edit_group_perms_for_object(perm_name, object, old_perm_groups_ids, new_perm_groups_ids):
remove_perm_groups_ids = []
for id in old_perm_groups_ids:
if id not in new_perm_groups_ids:
remove_perm_groups_ids.append(id)
if remove_perm_groups_ids:
remove_perm_for_groups_id(perm_name, object, remove_perm_groups_ids)
assign_perm_groups_ids = []
for i in new_perm_groups_ids:
if i not in old_perm_groups_ids:
assign_perm_groups_ids.append(i)
assign_perm_for_groups_id(perm_name, object, assign_perm_groups_ids)
|
[
"dostovalov@gmail.com"
] |
dostovalov@gmail.com
|
fc07cc8aa332da4535122a8f44ad78050787f822
|
bd5b3934969ebf4f693ceb4be17a68f9c3ebd414
|
/beginPython/ch09/queen2.py
|
0496019e7f12164f89f9d5fb28ddc7b2fe3a2ec6
|
[] |
no_license
|
lyk4411/untitled
|
bc46863d3bbb2b71edf13947f24b892c2cf43e1a
|
875b7dfa765ffa40d76582d2ae41813d2e15c8bd
|
refs/heads/master
| 2021-04-06T09:09:08.977227
| 2021-03-10T02:56:34
| 2021-03-10T02:56:34
| 124,990,530
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,238
|
py
|
def conflict(state,nextx):
'定义冲突函数,state为元组,nextx为下一个皇后的水平位置,nexty为下一个皇后的垂直位置'
nexty = len(state)
for i in range(nexty):
if abs(state[i]-nextx) in (0,nexty-i):#若下一个皇后和前面的皇后列相同或者在一条对角线上,则冲突
return True
return False
def queens(num=8,state=()):
'八皇后问题,这里num表示规模'
for pos in range(num):
if not conflict(state,pos):#位置不冲突
# print("pos")
# print(pos)
if len(state) == num - 1:#若是最后一个皇后,则返回该位置
yield (pos,)
print("last pos:" + str(pos))
else:#若不是最后一个皇后,则将该位置返回到state元组并传给后面的皇后
for result in queens(num,state + (pos,)):
print("================")
print("state:" + str(state))
print("pos:" + str(pos,))
print("result:" + str(result))
print("================")
yield (pos,) + result
print(list(queens(4)))
# print(tuple(queens(4)))
|
[
"moneyflying_2006@hotmail.com"
] |
moneyflying_2006@hotmail.com
|
06f83917b5ba4e8ace83e5b1da0fa8851fcb74f7
|
c83473c2f9b63429f40e8a4806ab49305815c81d
|
/introduction/basic_version_celcius.py
|
69e0eb4e50b6eb3724bf74838d7c3f1fd6c76375
|
[] |
no_license
|
pelinbalci/machinelearning
|
f8f84cda07a2ae87f23598188a6c148badb6e15f
|
33e9786ea49f114c24c02dbf24e33434d0421f65
|
refs/heads/master
| 2022-11-15T19:55:46.633659
| 2020-07-05T18:38:54
| 2020-07-05T18:38:54
| 273,779,533
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 670
|
py
|
# https://colab.research.google.com/github/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l02c01_celsius_to_fahrenheit.ipynb
import numpy as np
import tensorflow as tf
celsius_q = np.array([-40, -10, 0, 8, 15, 22, 38], dtype=float)
fahrenheit_a = np.array([-40, 14, 32, 46, 59, 72, 100], dtype=float)
learning_rate = 0.1
define_layers = tf.keras.layers.Dense(units=1, input_shape=[1])
model = tf.keras.Sequential([define_layers])
model.compile(loss='mean_squared_error', optimizer=tf.keras.optimizers.Adam(learning_rate))
history = model.fit(celsius_q, fahrenheit_a, epochs=500, verbose=False)
print(model.predict([100]))
|
[
"balci.pelin@gmail.com"
] |
balci.pelin@gmail.com
|
7f1db2986a6ad0c55e96979360f37996b7a2233f
|
b66985c330740d191b009abc46ff042c664eb3f6
|
/sporthub/settings.py
|
2d3b8bfce760454890cba8a644586e1aac1b09aa
|
[] |
no_license
|
kalyevb/footbal_pole
|
fee9fc4e082ca6b7bbb779b52aad69f8c4a3620b
|
349b528452369759fa570d36ae3489bfd9609f64
|
refs/heads/master
| 2022-05-09T13:30:12.493178
| 2019-12-17T15:52:51
| 2019-12-17T15:52:51
| 228,650,159
| 0
| 0
| null | 2022-04-22T22:57:22
| 2019-12-17T15:50:45
|
Python
|
UTF-8
|
Python
| false
| false
| 4,303
|
py
|
"""
Django settings for sporthub project.
Generated by 'django-admin startproject' using Django 1.11.26.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'qu40=oz3p%l0ej-7-b+9*7+@1^)y&fb1e3k@+@-^x^pf!wo(#x'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['mysite.com', 'localhost', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'account',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'social_django',
'fieldsapp',
'crispy_forms',
]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'sporthub.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sporthub.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Bishkek'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS =[
os.path.join(BASE_DIR, 'static')
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
LOGIN_REDIRECT_URL = 'dashboard'
LOGIN_URL = 'login'
LOGOUT_URL = 'logout'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'account.authentication.EmailAuthBackend',
'social_core.backends.facebook.FacebookOAuth2',
'social_core.backends.twitter.TwitterOAuth',
'social_core.backends.google.GoogleOAuth2',
]
SOCIAL_AUTH_FACEBOOK_KEY = '2154714614837357'
SOCIAL_AUTH_FACEBOOK_SECRET = 'c0f33b00efc22d24cd5c269314974cc9'
SOCIAL_AUTH_FACEBOOK_SCOPE = ['email']
SOCIAL_AUTH_TWITTER_KEY = 'XXX' # Twitter Consumer Key
SOCIAL_AUTH_TWITTER_SECRET = 'XXX' # Twitter Consumer Secret
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = '11457369489-atfkrl6s5pg0je1lfjee7bb6hob62kg9.apps.googleusercontent.com'
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = 'yzT41pSaii4cnv2H-jla_JQD'
|
[
"tip.beka99@gmail.com"
] |
tip.beka99@gmail.com
|
1711f9fe6583e0525560bad95419872022ed4f1f
|
879e06ea72c26dc4e7647fbd9e6eedb6bd3f0129
|
/PKD/ch01/co11p224.py
|
1d6d70bc9e80923c0205926e0e8d1f08ee862d04
|
[] |
no_license
|
IEP/submissions
|
6b27f1ef5518ce7ebc60525b2e2e237c4a6bebec
|
235aceee8bc48395b7fea25d00344554b22144f6
|
refs/heads/master
| 2022-04-15T09:08:48.391941
| 2020-04-05T15:08:16
| 2020-04-05T15:08:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 343
|
py
|
#!/bin/python
a = input()
b = input()
flag = False
o = 'Tentu saja bisa!'
if len(a) == len(b)+1:
b += ' '
for i in range(len(a)):
if not (a[i] == b[i] or a[i] == b[i-1]):
if not flag:
flag = True
else:
o = 'Wah, tidak bisa :('
else:
o = 'Wah, tidak bisa :('
print(o)
|
[
"ivan.ega.p@gmail.com"
] |
ivan.ega.p@gmail.com
|
643c167da61137f80baf505c741c81ef55235688
|
91075a58261b9858b6bba98968eca7f486175891
|
/62_quickWeather.py
|
1fa7bdcf712264adf01301da1841571191232f76
|
[] |
no_license
|
bingo8670/automat_the_boring_stuff_with_python
|
4b144cfd58dd25ffd57bab205719d13c103320e3
|
493da9438ae7053d6140b67c53fcba95fe3deb18
|
refs/heads/master
| 2020-03-25T23:57:54.305785
| 2018-12-14T09:07:21
| 2018-12-14T09:07:21
| 144,301,078
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 946
|
py
|
#! python3
# quickWeather.py - Prints the weather for a location from the command line.
import json, requests, sys
# Compute location from command line arguments.
if len(sys.argv) < 2:
print('Usage: quickWeather.py location')
sys.exit()
location = ' '.join(sys.argv[1:])
# Download the JSON data from OpenWeatherMap.org's API.
url ='http://api.openweathermap.org/data/2.5/forecast/daily?q=%s&cnt=3' % (location)
response = requests.get(url)
response.raise_for_status()
# Load JSON data into a Python variable. weatherData = json.loads(response.text) # Print weather descriptions.
w = weatherData['list']
print('Current weather in %s:' % (location))
print(w[0]['weather'][0]['main'], '-', w[0]['weather'][0]['description'])
print()
print('Tomorrow:')
print(w[1]['weather'][0]['main'], '-', w[1]['weather'][0]['description'])
print()
print('Day after tomorrow:')
print(w[2]['weather'][0]['main'], '-', w[2]['weather'][0]['description'])
|
[
"307286130@qq.com"
] |
307286130@qq.com
|
796c928dadf5c6278d15c714e6bd6781ff12818f
|
81c8baf31e15cf132b22cc489e7c8fc7b86003a4
|
/linuxos/context_processors.py
|
4ffb841705a623f5e4eac674d4a08e6f88c10080
|
[
"MIT"
] |
permissive
|
LinuxOSsk/Shakal-NG
|
0b0030af95a8dad4b120ae076920aa3a4020c125
|
93631496637cd3847c1f4afd91a9881cafb0ad83
|
refs/heads/master
| 2023-09-04T04:27:05.481496
| 2023-08-30T04:10:41
| 2023-08-30T04:10:41
| 2,168,932
| 11
| 8
|
MIT
| 2023-08-16T03:34:02
| 2011-08-07T14:36:25
|
Python
|
UTF-8
|
Python
| false
| false
| 280
|
py
|
# -*- coding: utf-8 -*-
from django.conf import settings as django_settings
def settings(request):
return {
'ANONYMOUS_COMMENTS': django_settings.ANONYMOUS_COMMENTS,
'ANONYMOUS_NEWS': django_settings.ANONYMOUS_NEWS,
'ANONYMOUS_TOPIC': django_settings.ANONYMOUS_TOPIC,
}
|
[
"miroslav.bendik@gmail.com"
] |
miroslav.bendik@gmail.com
|
5348f68bdd375eb497ae98d8ed944d330932b35f
|
597ed154876611a3d65ca346574f4696259d6e27
|
/dbaas/account/forms/user.py
|
05ce9c8dfc4d9ad444b5f31238c03b90d4befc9e
|
[] |
permissive
|
soitun/database-as-a-service
|
41984d6d2177734b57d726cd3cca7cf0d8c5f5d6
|
1282a46a9437ba6d47c467f315b5b6a3ac0af4fa
|
refs/heads/master
| 2023-06-24T17:04:49.523596
| 2018-03-15T19:35:10
| 2018-03-15T19:35:10
| 128,066,738
| 0
| 0
|
BSD-3-Clause
| 2022-05-10T22:39:58
| 2018-04-04T13:33:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,538
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import logging
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.forms import UserChangeForm, UserCreationForm
from django import forms
LOG = logging.getLogger(__name__)
class CustomUserChangeForm(UserChangeForm):
def __init__(self, *args, **kwargs):
super(CustomUserChangeForm, self).__init__(*args, **kwargs)
user_field = forms.RegexField(
label=_("Username"), max_length=100, regex=r'^[\w.@+-]+$',
help_text=_("Required. 100 characters or fewer. Letters, digits and "
"@/./+/-/_ only."),
error_messages={
'invalid': _("This value may contain only letters, numbers and "
"@/./+/-/_ characters.")
}
)
self.fields['username'] = user_field
class CustomUserCreationForm(UserCreationForm):
def __init__(self, *args, **kwargs):
super(CustomUserCreationForm, self).__init__(*args, **kwargs)
user_field = forms.RegexField(
label=_("Username"), max_length=100, regex=r'^[\w.@+-]+$',
help_text=_("Required. 100 characters or fewer. Letters, digits and "
"@/./+/-/_ only."),
error_messages={
'invalid': _("This value may contain only letters, numbers and "
"@/./+/-/_ characters.")
}
)
self.fields['username'] = user_field
|
[
"raposo.felippe@gmail.com"
] |
raposo.felippe@gmail.com
|
818277c61c410adf352a1317283a04d8cb8c17ac
|
d5328a2837883aaccdae8f7367cc4787ae70e070
|
/Processors/DataProcessor.py
|
c12c65b609679d60ce974a260dfb182a22e28a8a
|
[] |
no_license
|
FishRedLeaf/Bert-TextClassification
|
3845e09dc1b25e66c7cfeffb3ce14bbd22dcef15
|
b9da0b7a1f1964b482cfbedcad913498ffe7feb9
|
refs/heads/master
| 2020-05-28T09:33:11.638594
| 2019-05-24T10:19:40
| 2019-05-24T10:19:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,048
|
py
|
# coding=utf-8
import csv
import sys
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding='utf-8') as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
|
[
"18310523922@163.com"
] |
18310523922@163.com
|
44fe4c04c6ddd445c93ccb8e9984814ada833c22
|
f0a62605171bc62eb68dd884c77cf146657ec5cb
|
/library/f5bigip_net_vlan_interface.py
|
bb60660e07979ad7089d4656178a45cb28f870a6
|
[
"Apache-2.0"
] |
permissive
|
erjac77/ansible-role-f5
|
dd5cc32c4cc4c79d6eba669269e0d6e978314d66
|
c45b5d9d5f34a8ac6d19ded836d0a6b7ee7f8056
|
refs/heads/master
| 2020-04-06T08:13:14.095083
| 2020-02-16T23:44:13
| 2020-02-16T23:44:13
| 240,129,047
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,146
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2016 Eric Jacob <erjac77@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: f5bigip_net_vlan_interface
short_description: BIG-IP net vlan interface module
description:
- Configures a tagged or untagged interface and trunk for a VLAN.
version_added: "1.0.0" # of erjac77.f5 role
author:
- "Eric Jacob (@erjac77)"
options:
tag_mode:
description:
- Specifies the tag mode of the interface or trunk associated with.
choices: ['customer', 'service', 'double', 'none']
tagged:
description:
- Specifies the type of the interface.
choices: ['true', 'false']
untagged:
description:
- Specifies the type of the interface.
choices: ['true', 'false']
vlan:
description:
- Specifies the vlan in which the interface belongs.
required: true
extends_documentation_fragment:
- f5_common
- f5_app_service
- f5_name
- f5_partition
- f5_state
"""
EXAMPLES = """
- name: Add NET VLAN Interface
f5bigip_net_vlan_interface:
provider:
server: "{{ ansible_host }}"
server_port: "{{ http_port | default(443) }}"
user: "{{ http_user }}"
password: "{{ http_pass }}"
validate_certs: false
name: 1.1
untagged: true
tag_mode: none
vlan: /Common/internal
state: present
delegate_to: localhost
"""
RETURN = """ # """
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.erjac77.network.f5.common import F5_NAMED_OBJ_ARGS
from ansible.module_utils.erjac77.network.f5.common import F5_PROVIDER_ARGS
from ansible.module_utils.erjac77.network.f5.bigip import F5BigIpNamedObject
class ModuleParams(object):
@property
def argument_spec(self):
argument_spec = dict(
tag_mode=dict(
type="str", choices=["customer", "service", "double", "none"]
),
tagged=dict(type="bool"),
untagged=dict(type="bool"),
vlan=dict(type="str", required=True),
)
argument_spec.update(F5_PROVIDER_ARGS)
argument_spec.update(F5_NAMED_OBJ_ARGS)
del argument_spec["partition"]
return argument_spec
@property
def supports_check_mode(self):
return True
@property
def mutually_exclusive(self):
return [["tagged", "untagged"]]
class F5BigIpNetVlanInterface(F5BigIpNamedObject):
def _set_crud_methods(self):
vlan = self._api.tm.net.vlans.vlan.load(
**self._get_resource_id_from_path(self._params["vlan"])
)
self._methods = {
"create": vlan.interfaces_s.interfaces.create,
"read": vlan.interfaces_s.interfaces.load,
"update": vlan.interfaces_s.interfaces.update,
"delete": vlan.interfaces_s.interfaces.delete,
"exists": vlan.interfaces_s.interfaces.exists,
}
del self._params["vlan"]
def main():
params = ModuleParams()
module = AnsibleModule(
argument_spec=params.argument_spec,
supports_check_mode=params.supports_check_mode,
mutually_exclusive=params.mutually_exclusive,
)
try:
obj = F5BigIpNetVlanInterface(check_mode=module.check_mode, **module.params)
result = obj.flush()
module.exit_json(**result)
except Exception as exc:
module.fail_json(msg=str(exc))
if __name__ == "__main__":
main()
|
[
"erjac77@gmail.com"
] |
erjac77@gmail.com
|
1d1723243121552343f803b0adc17d95025ac667
|
11aaeaeb55d587a950456fd1480063e1aed1d9e5
|
/.history/ex45-test_20190612192241.py
|
37276da8d14d02df67279a69ce50e3a8905fe3b3
|
[] |
no_license
|
Gr4cchus/Learn-Python-3-The-Hard-Way
|
8ce9e68f6a91ea33ea45fe64bfff82d65422c4a8
|
f5fa34db16cdd6377faa7fcf45c70f94bb4aec0d
|
refs/heads/master
| 2020-05-17T23:18:29.483160
| 2019-06-26T18:42:52
| 2019-06-26T18:42:52
| 184,023,439
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,746
|
py
|
class Scenes(object):
# def __init__(self):
# # self.starting_room = starting_room
# # self.locations = {
# # 'room1': Room1(),
# # 'room2': Room2()
# # }
map_list = [
'room1',
'room2',
'finish'
]
def start(self):
print("You are at the start")
print("Where would you like to go")
self.locations()
def room1(self):
print("You enter room 1")
print("Where would you like to go")
def room2(self):
print("You enter room 2")
print("Where would you like to go")
def finish(self):
print("You have finished")
exit(0)
def locations(self):
print("defself.map_list)
for i in self.map_list:
print(i)
# def locations(self):
# dict_locations = {
# 'room1': room1(),
# 'room2': room2()
# }
# return dict_locations
# dict_locations = {
# 'room1': room1(),
# 'room2': room2()
# }
# class Locations(Scenes):
# pass
# def map(self):
# dict_locations = {
# 'room1': room1(),
# 'room2': room2()
# }
# return dict_locations
# class Engine():
# def __init__(self, map):
# self.map = map
# def play(self):
# while True:
# # a = self.map.dict_locations
# print('yes')
thescenes = Scenes()
# thelocations = Locations()
# thedict = thelocations.map()
# while True:
# print("loop")
# thelocations.map.dict_locations.get('room1')
thescenes.start()
action = input("> ")
if action in thescenes.locations.map_list:
print("success")
|
[
"ahivent@gmail.com"
] |
ahivent@gmail.com
|
d906481dbbceaebae4e4370ca5579524e678f153
|
9d9e0a269aca2280e841a083a5e10dc24a0eb14d
|
/build/rosserial/rosserial_mbed/catkin_generated/pkg.installspace.context.pc.py
|
6241f97f8af16ab6afe029d36c62ce4e12ddfe1e
|
[] |
no_license
|
JoseBalbuena181096/ROS_TUTORIAL
|
9e4eb8116366f5c4e449afe2d16a6954fa9c9b05
|
caba9b86f8456a660e8256abb8b75f45ed3b6dd7
|
refs/heads/master
| 2020-06-27T12:25:24.553275
| 2019-08-01T01:34:54
| 2019-08-01T01:34:54
| 199,954,345
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 463
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/jose/catkin_ws/install/include".split(';') if "/home/jose/catkin_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rosserial_mbed"
PROJECT_SPACE_DIR = "/home/jose/catkin_ws/install"
PROJECT_VERSION = "0.8.0"
|
[
"angelsnek2510@gmail.com"
] |
angelsnek2510@gmail.com
|
a01b5418aa330eafea4e8e610fc1e9de8a1fa572
|
9ca9226e11162d39dd5ef2db23b853bfac80020b
|
/pages/tuozhen_NewsdetailPage.py
|
ac64187510e909cac772d477e4c490ff91729503
|
[] |
no_license
|
chenshl/py_app_autoTest
|
ede64a5f380926aa191e9f1ba86955eb6bd3725e
|
5957112bf20f577ed71d851e73e1e1ce1800ff79
|
refs/heads/master
| 2020-03-12T11:02:51.630080
| 2018-04-29T08:49:39
| 2018-04-29T08:49:39
| 130,587,156
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 802
|
py
|
#!/usr/bin/python
# coding:utf-8
# @author : csl
# @date : 2018/04/29 09:04
# 新闻详情页面
from appium import webdriver
from base.Element import Element
class NewsdetailPage(Element):
#评论框
comment = "//android.widget.EditText[contains(@text, '说两句')]"
comment_comment = "很好的移动医疗平台"
submit_comment = "//android.widget.Button[contains(@text, '提交评论')]"
#提交评论
def comment_click(self):
self.get_xpath(self.comment).send_keys(self.comment_comment)
self.wait_for_xpath(self.submit_comment)
self.get_xpath(self.submit_comment).click()
#登录后返回直接提交
def comment_submit_click(self):
self.wait_for_xpath(self.submit_comment)
self.get_xpath(self.submit_comment).click()
|
[
"35643856@qq.com"
] |
35643856@qq.com
|
9be5a3a01f914b8842b74aea6fd0a01f02607b5e
|
0032c98333ffc0efdb920ecca31ab224378880e5
|
/rpi-tutorial/RaspEasy1.py
|
c6acfd285bdcdc7fae9df32146d5ae3281c0daba
|
[] |
no_license
|
raspibrick/install
|
bd1c6f9a8cb524f2ab5a2c17ad8c5463b768dffa
|
96288d6ca21abd8fb993cc376e37c16473b54dd5
|
refs/heads/master
| 2021-01-10T05:00:39.159879
| 2019-07-25T09:46:04
| 2019-07-25T09:46:04
| 40,703,681
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 451
|
py
|
# RaspEasy1.py
# Button press/release to switch on/off LED
import RPi.GPIO as GPIO
P_BUTTON = 12 # Button A
#P_BUTTON = 13 # Button B
P_LED = 7 # LED A
#P_LED = 11 # LED B
def setup():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(P_BUTTON, GPIO.IN)
GPIO.setup(P_LED, GPIO.OUT)
print "starting..."
setup()
while True:
if GPIO.input(P_BUTTON) == GPIO.HIGH:
GPIO.output(P_LED, GPIO.HIGH)
else:
GPIO.output(P_LED, GPIO.LOW)
|
[
"a2015@pluess.name"
] |
a2015@pluess.name
|
47ae774d608c22a3626c9bee64788db4ff3229a6
|
88ae8695987ada722184307301e221e1ba3cc2fa
|
/v8/tools/release/mergeinfo.py
|
7136463afb265929947d5bc1429e69855cdc0582
|
[
"BSD-3-Clause",
"SunPro",
"Apache-2.0"
] |
permissive
|
iridium-browser/iridium-browser
|
71d9c5ff76e014e6900b825f67389ab0ccd01329
|
5ee297f53dc7f8e70183031cff62f37b0f19d25f
|
refs/heads/master
| 2023-08-03T16:44:16.844552
| 2023-07-20T15:17:00
| 2023-07-23T16:09:30
| 220,016,632
| 341
| 40
|
BSD-3-Clause
| 2021-08-13T13:54:45
| 2019-11-06T14:32:31
| null |
UTF-8
|
Python
| false
| false
| 5,496
|
py
|
#!/usr/bin/env python3
# Copyright 2015 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# for py2/py3 compatibility
from __future__ import print_function
import argparse
import os
import sys
import re
from subprocess import Popen, PIPE
GIT_OPTION_HASH_ONLY = '--pretty=format:%H'
GIT_OPTION_NO_DIFF = '--quiet'
GIT_OPTION_ONELINE = '--oneline'
def git_execute(working_dir, args, verbose=False):
command = ["git", "-C", working_dir] + args
if verbose:
print("Git working dir: " + working_dir)
print("Executing git command:" + str(command))
p = Popen(args=command, stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate()
if p.returncode != 0:
raise Exception(err)
output = output.decode('utf-8')
if verbose:
print("Git return value: " + output)
return output
def describe_commit(git_working_dir, hash_to_search, one_line=False):
if one_line:
return git_execute(git_working_dir, ['show',
GIT_OPTION_NO_DIFF,
GIT_OPTION_ONELINE,
hash_to_search]).strip()
return git_execute(git_working_dir, ['show',
GIT_OPTION_NO_DIFF,
hash_to_search]).strip()
def get_followup_commits(git_working_dir, hash_to_search):
cmd = ['log', '--grep=' + hash_to_search, GIT_OPTION_HASH_ONLY,
'remotes/origin/main'];
return git_execute(git_working_dir, cmd).strip().splitlines()
def get_merge_commits(git_working_dir, hash_to_search):
merges = get_related_commits_not_on_main(git_working_dir, hash_to_search)
false_merges = get_related_commits_not_on_main(
git_working_dir, 'Cr-Branched-From: ' + hash_to_search)
false_merges = set(false_merges)
return ([merge_commit for merge_commit in merges
if merge_commit not in false_merges])
def get_related_commits_not_on_main(git_working_dir, grep_command):
commits = git_execute(git_working_dir, ['log',
'--all',
'--grep=' + grep_command,
GIT_OPTION_ONELINE,
'--decorate',
'--not',
'remotes/origin/main',
GIT_OPTION_HASH_ONLY])
return commits.splitlines()
def get_branches_for_commit(git_working_dir, hash_to_search):
branches = git_execute(git_working_dir, ['branch',
'--contains',
hash_to_search,
'-a']).strip()
branches = branches.splitlines()
return {branch.strip() for branch in branches}
def is_lkgr(branches):
return 'remotes/origin/lkgr' in branches
def get_first_canary(branches):
canaries = ([currentBranch for currentBranch in branches if
currentBranch.startswith('remotes/origin/chromium/')])
canaries.sort()
if len(canaries) == 0:
return 'No Canary coverage'
return canaries[0].split('/')[-1]
def get_first_v8_version(branches):
version_re = re.compile("remotes/origin/[0-9]+\.[0-9]+\.[0-9]+")
versions = [branch for branch in branches if version_re.match(branch)]
if len(versions) == 0:
return "--"
version = versions[0].split("/")[-1]
return version
def print_analysis(git_working_dir, hash_to_search):
print('1.) Searching for "' + hash_to_search + '"')
print('=====================ORIGINAL COMMIT START===================')
print(describe_commit(git_working_dir, hash_to_search))
print('=====================ORIGINAL COMMIT END=====================')
print('2.) General information:')
branches = get_branches_for_commit(git_working_dir, hash_to_search)
print('Is LKGR: ' + str(is_lkgr(branches)))
print('Is on Canary: ' + str(get_first_canary(branches)))
print('First V8 branch: ' + str(get_first_v8_version(branches)) + \
' (Might not be the rolled version)')
print('3.) Found follow-up commits, reverts and ports:')
followups = get_followup_commits(git_working_dir, hash_to_search)
for followup in followups:
print(describe_commit(git_working_dir, followup, True))
print('4.) Found merges:')
merges = get_merge_commits(git_working_dir, hash_to_search)
for currentMerge in merges:
print(describe_commit(git_working_dir, currentMerge, True))
print('---Merged to:')
mergeOutput = git_execute(git_working_dir, ['branch',
'--contains',
currentMerge,
'-r']).strip()
print(mergeOutput)
print('Finished successfully')
if __name__ == '__main__': # pragma: no cover
parser = argparse.ArgumentParser('Tool to check where a git commit was'
' merged and reverted.')
parser.add_argument('-g', '--git-dir', required=False, default='.',
help='The path to your git working directory.')
parser.add_argument('hash',
nargs=1,
help='Hash of the commit to be searched.')
args = sys.argv[1:]
options = parser.parse_args(args)
sys.exit(print_analysis(options.git_dir, options.hash[0]))
|
[
"jengelh@inai.de"
] |
jengelh@inai.de
|
e64b04bed60680e2acb04477bef4c8a503e4a2f3
|
3d39974209f890080456c5f9e60397c505540c64
|
/0x0A-python-inheritance/5-main.py
|
df9dd05cb9641b418d71e98f33974e3e81993285
|
[] |
no_license
|
salmenz/holbertonschool-higher_level_programming
|
293ca44674833b587f1a3aec13896caec4e61ab6
|
23792f8539db48c8f8200a6cdaf9268d0cb7d4e6
|
refs/heads/master
| 2020-09-28T11:42:51.264437
| 2020-05-13T22:56:39
| 2020-05-13T22:56:39
| 226,771,568
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
#!/usr/bin/python3
BaseGeometry = __import__('5-base_geometry').BaseGeometry
bg = BaseGeometry()
print(bg)
print(dir(bg))
print(dir(BaseGeometry))
|
[
"salmen.zooro@gmail.com"
] |
salmen.zooro@gmail.com
|
64c4c0966c54ffac9c7ef815fc3a30c92146fa0e
|
a606893da1e354c7c617d0c9247b23118be2813a
|
/动态规划/19.py
|
5b823571231090602205c302b2258b4f90e32ff4
|
[] |
no_license
|
lindo-zy/leetcode
|
4ce6cb9ded7eeea0a6953b6d8152b5a9657965da
|
f4277c11e620ddd748c2a2f3d9f5f05ee58e5716
|
refs/heads/master
| 2023-07-22T06:19:00.589026
| 2023-07-16T12:35:14
| 2023-07-16T12:35:14
| 229,958,065
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,550
|
py
|
#!/usr/bin/python3
# -*- coding:utf-8 -*-
class Solution:
def isMatch(self, s: str, p: str) -> bool:
# m = len(s)
# n = len(p)
# dp = [[0 for i in range(m)] for i in range(n)]
# for i in range(m):
# for j in range(n):
# if s[i] == p[j]:
# dp[i][j] = ''
# elif s[i] != p[j] and p[j] == '.':
# pass
# elif s[i] != p[j] and p[j] == '*':
# pass
# return dp[-1][-1] > 0
m, n = len(s) + 1, len(p) + 1
dp = [[False] * n for _ in range(m)]
dp[0][0] = True
# 初始化首行
for j in range(2, n, 2):
dp[0][j] = dp[0][j - 2] and p[j - 1] == '*'
# 状态转移
for i in range(1, m):
for j in range(1, n):
if p[j - 1] == '*':
if dp[i][j - 2]:
dp[i][j] = True # 1.
elif dp[i - 1][j] and s[i - 1] == p[j - 2]:
dp[i][j] = True # 2.
elif dp[i - 1][j] and p[j - 2] == '.':
dp[i][j] = True # 3.
else:
if dp[i - 1][j - 1] and s[i - 1] == p[j - 1]:
dp[i][j] = True # 1.
elif dp[i - 1][j - 1] and p[j - 1] == '.':
dp[i][j] = True # 2.
return dp[-1][-1]
if __name__ == '__main__':
s1 = Solution()
s = 'aab'
p = 'c*a*b'
print(s1.isMatch(s, p))
|
[
"492201845@qq.com"
] |
492201845@qq.com
|
9affb7daf8c9cbd553358fe630f1221b9be2311b
|
cb181d1bd709faff629203c057809615ef4cf02e
|
/chembl_extras/management/commands/generate_ora2pg_conf.py
|
75c1685a4614bc20938312e5beac34024625a3b1
|
[
"Apache-2.0"
] |
permissive
|
chembl/chembl_extras
|
78361e3e65c00a166aaf793fac2cdf105a021af0
|
ed4f4782d77b10f76984a7fbe18642cdb015c2de
|
refs/heads/master
| 2021-01-25T08:54:18.946778
| 2017-05-10T12:42:22
| 2017-05-10T12:42:22
| 27,765,269
| 0
| 1
| null | 2015-03-03T13:20:24
| 2014-12-09T12:20:49
|
Python
|
UTF-8
|
Python
| false
| false
| 4,784
|
py
|
__author__ = 'mnowotka'
import os
from django.core.management.base import BaseCommand
from optparse import make_option
from django.db import DEFAULT_DB_ALIAS
from django.conf import settings
from django import db
from django.db import connections
from collections import OrderedDict
from django.core.serializers import sort_dependencies
# ----------------------------------------------------------------------------------------------------------------------
class Command(BaseCommand):
help = "Prepare configuration file for ora2pg tool."
args = '[appname appname.ModelName ...]'
confTemplate = '''
ORACLE_HOME %s
ORACLE_DSN dbi:Oracle:host=%s;sid=%s;port=%s
ORACLE_USER %s
ORACLE_PWD %s
SCHEMA %s
TABLES %s
USER_GRANTS 1
DEBUG 0
EXPORT_SCHEMA 0
COMPILE_SCHEMA 0
TYPE DATA
DATA_LIMIT %s
CASE_SENSITIVE 0
OUTPUT %s
DATA_TYPE DATE:date,LONG:text,LONG RAW:bytea,CLOB:text,NCLOB:text,BLOB:bytea,BFILE:bytea,RAW:bytea,ROWID:oid,FLOAT:double precision,DEC:decimal,DECIMAL:decimal,DOUBLE PRECISION:double precision,INT:integer,INTEGER:integer,REAL:real,SMALLINT:smallint,BINARY_FLOAT:double precision,BINARY_DOUBLE:double precision,TIMESTAMP:timestamp,TIMESTAMP WITH TIME ZONE:timestamp with time zone,TIMESTAMP WITH LOCAL TIME ZONE:timestamp
BZIP2 /bin/bzip2
GEN_USER_PWD 0
FKEY_DEFERRABLE 0
DEFER_FKEY 0
DROP_FKEY 0
DROP_INDEXES 0
PG_NUMERIC_TYPE 0
DEFAULT_NUMERIC NUMERIC
KEEP_PKEY_NAMES 1
DISABLE_TABLE_TRIGGERS 1
NOESCAPE 0
DISABLE_SEQUENCE 0
ORA_SENSITIVE 0
PLSQL_PGSQL 1
ORA_RESERVED_WORDS audit,comment
FILE_PER_CONSTRAINT 0
FILE_PER_INDEX 0
FILE_PER_TABLE 0
TRANSACTION serializable
PG_SUPPORTS_WHEN 1
PG_SUPPORTS_INSTEADOF 0
FILE_PER_FUNCTION 0
TRUNCATE_TABLE 0
FORCE_OWNER 0
STANDARD_CONFORMING_STRINGS 0
THREAD_COUNT 0
ALLOW_CODE_BREAK 1
XML_PRETTY 1
FDW_SERVER orcl
ENABLE_MICROSECOND 0
DISABLE_COMMENT 1
'''
# ----------------------------------------------------------------------------------------------------------------------
def add_arguments(self, parser):
parser.add_argument('--database', dest='sourceDatabase', default=DEFAULT_DB_ALIAS, help='Source database')
parser.add_argument('--dumpfile', dest='dumpfile', default=None, help='Location of dump file.')
parser.add_argument('--datalimit', dest='dataLimit', default=10000, help='Data limit')
parser.add_argument('--app', dest='app', default='chembl_migration_model', help='App to be exported')
# ----------------------------------------------------------------------------------------------------------------------
def handle(self, *args, **options):
from django.apps import apps
# TODO : Check export mode
db.reset_queries()
sourceDatabase = options.get('sourceDatabase')
dataLimit = options.get('dataLimit')
app = apps.get_app(options.get('app'))
con = connections[sourceDatabase]
if con.vendor != 'oracle':
print "Source database has to be oracle."
return
user = settings.DATABASES[sourceDatabase]['USER']
passwd = settings.DATABASES[sourceDatabase]['PASSWORD']
host = settings.DATABASES[sourceDatabase]['HOST']
port = settings.DATABASES[sourceDatabase]['PORT']
name = settings.DATABASES[sourceDatabase]['NAME']
app_list = OrderedDict((app, None) for app in [app])
tables = []
sorted = sort_dependencies(app_list.items())
lastObjectName = sorted[-1].__name__
filename = lastObjectName + ".postgresql_psycopg2.sql"
chemblSQLPath = os.path.join(os.path.dirname(app.__file__),'sql', filename)
location = chemblSQLPath
oracleHome = os.environ['ORACLE_HOME']
if options.get('dumpfile'):
if not options.get('dumpfile').endswith('.sql'):
location = os.path.join(options.get('dumpfile'), filename)
else:
location = options.get('dumpfile')
for model in reversed(sorted):
if not model._meta.managed:
continue
tables.append(model._meta.db_table)
print self.confTemplate % (oracleHome, host, name, port, user, passwd, user, " ".join(tables), dataLimit, location)
if location != chemblSQLPath:
print "different! location = " + location + ", chemblSQLPath = " + chemblSQLPath
f = open(location, 'w')
f.close()
os.symlink(location, chemblSQLPath)
# ----------------------------------------------------------------------------------------------------------------------
|
[
"mnowotka@ebi.ac.uk"
] |
mnowotka@ebi.ac.uk
|
1f73255d352061d5d5de367ce1cde91ab143216a
|
373035950bdc8956cc0b74675aea2d1857263129
|
/spar_python/query_generation/generators/keyword_query_generator_test.py
|
1a3ad343f9a5a7f147d582faaaeaa4e2f1435ce0
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
limkokholefork/SPARTA
|
5d122cd2e920775d61a5404688aabbafa164f22e
|
6eeb28b2dd147088b6e851876b36eeba3e700f16
|
refs/heads/master
| 2021-11-11T21:09:38.366985
| 2017-06-02T16:21:48
| 2017-06-02T16:21:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,503
|
py
|
# *****************************************************************
# Copyright 2013 MIT Lincoln Laboratory
# Project: SPAR
# Authors: ATLH
# Description: Tests for equality_query_generator
#
# Modifications:
# Date Name Modification
# ---- ---- ------------
# 6 August 2012 ATLH Original version
# *****************************************************************
from __future__ import division
import os
import sys
this_dir = os.path.dirname(os.path.abspath(__file__))
base_dir = os.path.join(this_dir, '..', '..', '..')
sys.path.append(base_dir)
import unittest
import time
import keyword_query_generator as kqg
import spar_python.common.spar_random as spar_random
import spar_python.common.distributions.text_generator as text_generator
import StringIO as s
import spar_python.query_generation.query_schema as qs
import spar_python.data_generation.spar_variables as sv
class KeywordQueryGeneratorTest(unittest.TestCase):
def setUp(self):
self.seed = int(time.time())
self.seed_msg = "Random seed used for this test: %s" % self.seed
self.longMessage = True
spar_random.seed(self.seed)
#set up intitialization values
sub_cat = 'word'
f = s.StringIO('''Buck had accepted the rope with quiet dignity. To be sure, it
unwonted performance: but he had learned to trust in men he knew, and to
give them credit for a wisdom that outreached his own. But when the ends
of the ropes were placed in the strangers hands, he growled menacingly.
He had merely intimated his displeasure, in his pride believing that to
intimate was to command. But to his surprise the rope tightened around
his neck, shutting off his breath. In quick rage he sprang at the man,
who met him halfway, grappled him close by the throat, and with a deft
twist threw him over on his back. Then the rope tightened mercilessly,
while Buck struggled in a fury, his tongue lolling out of his mouth and
his great chest. Never in all his life had he been so
vilely treated, and never in all his life had he been so angry. But his
strength ebbed, his eyes glazed, and he knew nothing when the train was
flagged and the two men threw him into the baggage car.''')
self._kw_dist = text_generator.TextGenerator((f,))
fields = [sv.VARS.NOTES3]
dists = [self._kw_dist]
other_fields = ['no_queries', 'rss','keyword_len','type']
other_cols = [[3, 60, 4, 'word'], [3, 60, 5, 'word'],
[3, 75, 4, 'stem'], [3, 60, 5, 'stem']]
self.generator = kqg.KeywordQueryGenerator('P3',sub_cat, ["LL"],dists, fields, 1000,
100, other_fields, other_cols)
@unittest.skip("Sporadically fails, not sure why")
def testGenerateQuery(self):
"""
Tests equality query generator against a 'db' to make sure it is
generating the right queries
"""
#generate a 'db' to test against
notes = [self._kw_dist.generate(125) for _ in xrange(1000)]
#generate queries
query_batches = self.generator.produce_query_batches()
queries = []
for query_batch in query_batches:
queries += query_batch.produce_queries()
#check to see right number of queries generated
self.assertGreaterEqual(len(queries), 6, self.seed_msg)
#check queries against 'db' to make sure they match within a factor
#of two
word = 0
stem = 0
working_queries = 0
non_working_queries = []
for q in queries:
if q[qs.QRY_TYPE] == 'word':
x = lambda generated_text: \
generated_text.contains_upper(q[qs.QRY_SEARCHFOR])
word +=1
elif q[qs.QRY_TYPE] == 'stem':
x = lambda generated_text: \
generated_text.contains_stem(q[qs.QRY_SEARCHFOR])
stem +=1
count_match = len([note for note in notes if x(note)])
msg = 'Query %d was: \n' \
'sub_cat: %s\n'\
'field: %s\n'\
'type: %s\n'\
'rss: %d\n'\
'value: %s\n' % (q[qs.QRY_QID], q[qs.QRY_SUBCAT],
q[qs.QRY_FIELD], q[qs.QRY_TYPE],
q[qs.QRY_RSS], q[qs.QRY_SEARCHFOR])
if count_match <= q[qs.QRY_URSS]*4 and count_match >= q[qs.QRY_LRSS]/4:
working_queries+=1
else:
non_working_queries.append(msg)
fail_msg = ''
for msg in non_working_queries[:3]:
fail_msg += msg
self.assertGreaterEqual(working_queries, 6, fail_msg)
#check to see each field had the correct number of queries
#ideally this number would be greater than 6 (the requested amount)
#but because the distribution used for unit testing is so small
#there is a greater margin of error at this scale
self.assertGreaterEqual(word, 3, self.seed_msg)
self.assertGreaterEqual(stem, 3, self.seed_msg)
|
[
"mitchelljd@ll.mit.edu"
] |
mitchelljd@ll.mit.edu
|
32015bbff11b11145a125a55d2a4a1aa07262ac3
|
f9b30e3406d23569c5b6dd4a778454683a72744b
|
/editor/views/timeline.py
|
a959317d8333fd9cf176e1430b9df22b9978ac13
|
[
"CC-BY-SA-3.0",
"Apache-2.0"
] |
permissive
|
numbas/editor
|
65b0644f28192180b83ab18a9ed09886b4c0ce6b
|
c11a5ae11f013d63114535a8f0b0f3ec635c8bd5
|
refs/heads/master
| 2023-08-17T07:34:00.283142
| 2023-08-16T13:44:54
| 2023-08-16T13:44:54
| 3,493,021
| 65
| 83
|
Apache-2.0
| 2023-07-19T08:27:36
| 2012-02-20T11:20:15
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,253
|
py
|
import json
from editor.views.generic import user_json, stamp_json, comment_json
from editor.models import TimelineItem
from django.views import generic
from django import http
from django.urls import reverse
event_json_views = {
'stamp': stamp_json,
'comment': comment_json,
}
def event_json(event, viewed_by):
date = event.date.strftime('%Y-%m-%d %H:%M:%S')
user = user_json(event.user)
if event.type not in event_json_views:
raise Exception("Unrecognised event type %s" % event.type)
data = event_json_views[event.type](event.data, viewed_by=viewed_by)
return {
'date': date,
'type': event.type,
'data': data,
'user': user,
}
def timeline_json(events, viewed_by):
return [event_json(event, viewed_by) for event in events]
class DeleteTimelineItemView(generic.DeleteView):
model = TimelineItem
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
return self.try_delete()
def form_valid(self, form):
return self.try_delete()
def try_delete(self):
if self.object.can_be_deleted_by(self.request.user):
self.object.delete()
return http.HttpResponse('timeline item {} deleted'.format(self.object.pk))
else:
return http.HttpResponseForbidden('You don\'t have the necessary access rights.')
class HideTimelineItemView(generic.UpdateView):
model = TimelineItem
fields = []
http_method_names = ['post', 'head', 'options', 'trace']
def post(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.hidden_by.add(self.request.user)
data = {
'success': True,
'undo': reverse('timelineitem_unhide', args=(self.object.pk,))
}
return http.HttpResponse(json.dumps(data), content_type='application/json')
class UnhideTimelineItemView(generic.UpdateView):
model = TimelineItem
def post(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.hidden_by.remove(self.request.user)
data = {
'success': True,
}
return http.HttpResponse(json.dumps(data), content_type='application/json')
|
[
"christianperfect@gmail.com"
] |
christianperfect@gmail.com
|
926176b80e39090a452bfcef204049145d25a362
|
68f757e7be32235c73e316888ee65a41c48ecd4e
|
/python_book(이것이 코딩테스트다)/조합문제 예시 p486.py
|
3e5176b472357fc8ad7125c6f28c9d12695f6b86
|
[] |
no_license
|
leejongcheal/algorithm_python
|
b346fcdbe9b1fdee33f689477f983a63cf1557dc
|
f5d9bc468cab8de07b9853c97c3db983e6965d8f
|
refs/heads/master
| 2022-03-05T20:16:21.437936
| 2022-03-03T01:28:36
| 2022-03-03T01:28:36
| 246,039,901
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,313
|
py
|
"""조합 문제 예시
L개의 비번갯수와 C개의 알파벳 입력받음
최소 모음 1개이상자음 2개이상으로 정렬순으로 L개의 암호를 가진것을 알파벳순으로 출력
combinations의 기본
- 반환값 : 특정 주소를 가지는 조합타입 [(),(),()..]식으로 반환
- 원소값 : 튜플형식을 가짐
반환값에 대해서 리스트형으로 형변환과 원소값도 리스트로 형변환 하는게 편하다.
문자열에대한 조합 사용 예시
2개로 나누어서 풀었는데 그냥 다 뽑은 다음에 모음과 자음 갯수를 검사해서 출력하는식으로 푸는게 훨씬 편했을듯
"""
import sys, itertools
input = sys.stdin.readline
L, C = map(int, input().rstrip().split())
result = []
alpa = list(input().rstrip().split())
mo = []
ja = []
for i in alpa:
if i in "aeoui":
mo.append(i)
else:
ja.append(i)
mo.sort()
for i in range(1, len(mo) + 1):
if L - i < 2:
break
mo_result = list(itertools.combinations(mo, i))
ja_result = list(itertools.combinations(ja, L - i))
for mo_data in mo_result:
for ja_data in ja_result:
temp = ""
temp = list(mo_data) + list(ja_data)
result.append(sorted(temp))
result.sort()
for r in result:
print("".join(r))
|
[
"aksndk123@naver.com"
] |
aksndk123@naver.com
|
8f482032abe72bd653d2038e495eca19f4fa7f93
|
89207f4e5c5a8fd955adf775a553c32359a0cae8
|
/test.py
|
84b1107cbbfbcc2f81877535be972409c2ed3e10
|
[
"BSD-4-Clause"
] |
permissive
|
ikbear/seven-cow
|
065161f811c465f0ce1579471bf9a0ba4fc1105d
|
4a6fc7392b2feddf67d7f338794758bdc19379a9
|
refs/heads/master
| 2021-01-16T17:50:41.438442
| 2013-06-27T09:25:16
| 2013-06-27T09:25:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,586
|
py
|
import os
from sevencow import Cow
class Test(object):
def setUp(self):
ACCESS_KEY = os.environ['QINIU_ACCESS_KEY']
SECRET_KEY = os.environ['QINIU_SECRET_KEY']
bucket = os.environ['QINIU_BUCKET']
cow = Cow(ACCESS_KEY, SECRET_KEY)
self.b = cow.get_bucket(bucket)
for i in range(3):
with open('sevencow{0}'.format(i), 'w') as f:
f.write('0000')
def tearDown(self):
for f in self._multi_files():
try:
os.unlink(f)
except IOError:
pass
def _list_file_names(self):
files = self.b.list_files()
return [f['key'] for f in files['items']]
def _multi_files(self):
return ['sevencow{0}'.format(i) for i in range(3)]
def testaPutSingle(self):
key = 'sevencow0'
res = self.b.put(key)
assert key == res['key']
assert key in self._list_file_names()
def testbPutMulti(self):
keys = self._multi_files()
res = self.b.put(*keys)
res_keys = [r['key'] for r in res]
assert keys == res_keys
files = self._list_file_names()
for k in keys:
assert k in files
def testcStatSingle(self):
self.b.stat('sevencow0')
def testdStatMulti(self):
self.b.stat(*self._multi_files())
def testeCopySingle(self):
self.b.copy('sevencow0', 'sevencow01')
assert 'sevencow01' in self._list_file_names()
def testfCopyMulti(self):
self.b.copy(('sevencow1', 'sevencow11'), ('sevencow2', 'sevencow21'))
files = self._list_file_names()
assert 'sevencow11' in files
assert 'sevencow21' in files
def testgMoveSingle(self):
self.b.move('sevencow01', 'sevencow011')
files = self._list_file_names()
assert 'sevencow01' not in files
assert 'sevencow011' in files
def testhMoveMulti(self):
self.b.move(('sevencow11', 'sevencow111'), ('sevencow21', 'sevencow211'))
files = self._list_file_names()
assert 'sevencow11' not in files and 'sevencow21' not in files
assert 'sevencow111' in files and 'sevencow211' in files
def testiDeleteSingle(self):
self.b.delete('sevencow0')
assert 'sevencow0' not in self._list_file_names()
def testjDeleteMulti(self):
keys = ['sevencow1', 'sevencow2', 'sevencow011', 'sevencow111', 'sevencow211']
self.b.delete(*keys)
files = self._list_file_names()
for k in keys:
assert k not in files
|
[
"yueyoum@gmail.com"
] |
yueyoum@gmail.com
|
41a41fbbd319825b3648367519e4f9dd507552ae
|
27aa2aa55c4c03b032b62c462e98385e011ec923
|
/5_5_1_Hash_Functions.py
|
9d0b25bfbc81006f485cc366cbc720e19fc89387
|
[] |
no_license
|
huiyanglu/DataStructures
|
bb08f07ded680f5c02d7264123b7b48cab41a223
|
76c6dee95c747729a19b7f910c9f344d25e4bab0
|
refs/heads/master
| 2020-04-02T04:49:23.676385
| 2019-08-13T15:37:23
| 2019-08-13T15:37:23
| 154,036,882
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 641
|
py
|
"""
create hash functions for character-based items such as strings.
"""
def hash(astring,tablesize):
sum = 0
for pos in range(len(astring)):
sum = sum + ord(astring[pos])
return sum % tablesize
"""
When using this hash function, anagrams will always be given the same hash value.
To remedy this, we could use the position of the character as a weight.
One possible way to use the positional value as a weighting factor.
"""
def hashwithWeight(astring,tablesize):
sum = 0
for pos in range(len(astring)):
sum = sum + ord(astring[pos])*(pos+1)
return sum % tablesize
print(hashwithWeight('cat',11))
|
[
"luchocoice@gmail.com"
] |
luchocoice@gmail.com
|
8fa00d7a6d2ce243aade814d74998d95242efa9e
|
18e2f67599b45b98c14931a8287a15b963250c83
|
/23_mtPool.py
|
fe2e58291b7370c831f0b2aec7e444b631bd39c9
|
[] |
no_license
|
uuboyscy/eb102-python
|
77ffb9a79a05d8371a5f38463c60ce579cbd9b39
|
ec0e32cb1b383b9ad0c0eb68696a15569bf6037d
|
refs/heads/master
| 2022-09-09T05:16:31.749893
| 2020-05-18T03:47:17
| 2020-05-18T03:47:17
| 262,553,112
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 564
|
py
|
# from multiprocessing import Pool
# import multiprocessing as mp
from multiprocessing.dummy import Pool
import time
import os
def longTimeTask(i):
print('task: {}, PID: {}'.format(i, os.getpid()))
time.sleep(5)
result = 10 ** 30
print('result: ', result)
return result
if __name__ == '__main__':
start_time = time.time()
print('母程序PID:', os.getpid())
p = Pool(4)
data = p.map(longTimeTask, iterable=range(0, 4))
p.close()
p.join()
print(data)
end_time = time.time()
print(end_time - start_time)
|
[
"aegis12321@gmail.com"
] |
aegis12321@gmail.com
|
85e4609531f380ca3101cd87f99fb0acd6f0d120
|
7463a66dfa00572f4e4d8ef4349309531f0105ae
|
/TrainerDL/Utils/PytorchToCaffe/Caffe/caffe_lmdb.py
|
b4d538d0ba5c54137d4889ea9bdce54292bb09a6
|
[
"MIT"
] |
permissive
|
fx19940824/DetectionModel
|
f2e380fd21f4b31a17fd175a6dea1067b8f0d5cc
|
edc0d2f9eea481d2bc6f3abb2f222b59fdc25538
|
refs/heads/master
| 2022-12-20T19:58:32.224829
| 2019-05-30T01:16:05
| 2019-05-30T01:16:05
| 188,800,679
| 2
| 0
| null | 2022-11-22T02:39:23
| 2019-05-27T08:13:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,005
|
py
|
import lmdb
from Utils.PytorchToCaffe.Caffe import caffe_pb2 as pb2
import numpy as np
class Read_Caffe_LMDB():
def __init__(self,path,dtype=np.uint8):
self.env=lmdb.open(path, readonly=True)
self.dtype=dtype
self.txn=self.env.begin()
self.cursor=self.txn.cursor()
@staticmethod
def to_numpy(value,dtype=np.uint8):
datum = pb2.Datum()
datum.ParseFromString(value)
flat_x = np.fromstring(datum.data, dtype=dtype)
data = flat_x.reshape(datum.channels, datum.height, datum.width)
label=flat_x = datum.label
return data,label
def iterator(self):
while True:
key,value=self.cursor.key(),self.cursor.value()
yield self.to_numpy(value,self.dtype)
if not self.cursor.next():
return
def __iter__(self):
self.cursor.first()
it = self.iterator()
return it
def __len__(self):
return int(self.env.stat()['entries'])
|
[
"you@example.com"
] |
you@example.com
|
4001aed2525fab6e77d25b475686406794abbccd
|
5004bd99b73653d6288122f38f5a58b4550ac55c
|
/setup.py
|
9df34b00cc918fa68ec26797d512db67fdea0c2f
|
[] |
no_license
|
BrancoLab/Fiberphotometry
|
78e2abb2149360393e4d718af908a6ee1351949e
|
d4440c1a6d343bd0d55f43f70a2f59bffa19c7c8
|
refs/heads/master
| 2023-01-01T09:37:02.908497
| 2020-10-28T15:46:07
| 2020-10-28T15:46:07
| 222,673,325
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 632
|
py
|
from setuptools import setup, find_namespace_packages
requirements = [
"numpy",
"pandas",
"moviepy",
"tqdm",
"python-opencv",
"matplotlib",
"seaborn",
"sklearn",
"scipy",
"psychopy",
"pypylon",
"fancylog"
]
setup(
name="fiberphotometry",
version="0.0.0.1",
author_email="federicoclaudi@protonmail.com",
description="bunch of utility functions to analyse fiberphotometry data",
packages=find_namespace_packages(exclude=()),
include_package_data=True,
install_requires=requirements,
url="https://github.com/BrancoLab/Fiberphotometry",
author="Federico Claudi, Yu Lin Tan",
zip_safe=False,
)
|
[
"federicoclaudi@protonmail.com"
] |
federicoclaudi@protonmail.com
|
eb50e7b8c14f42d9fcd0d1cde2a5ef4b1a278281
|
c8f4731bf85003b7d9f5a908723d15a33415eea5
|
/caffe2/python/layers/pairwise_dot_product.py
|
1c6820c9f9ee001e34dd652a4739c342bd27d27f
|
[
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
ChengduoZhao/caffe2
|
5c90fd66374f7f00b1330831e23dc9915da5028c
|
7811796ac91f5457208cb20bcfc55de2d39d21ba
|
refs/heads/master
| 2021-01-01T17:11:43.297274
| 2017-07-27T04:52:18
| 2017-07-27T05:08:00
| 98,018,932
| 0
| 1
| null | 2017-07-22T08:44:58
| 2017-07-22T08:44:58
| null |
UTF-8
|
Python
| false
| false
| 2,244
|
py
|
## @package dot_product
# Module caffe2.python.layers.dot_product
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import (
ModelLayer,
)
class PairwiseDotProduct(ModelLayer):
def __init__(self, model, input_record, output_dim,
name='pairwise_dot_product', **kwargs):
super(PairwiseDotProduct, self).__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Struct), (
"Incorrect input type. Excpected Struct, but received: {0}".
format(input_record))
assert 'all_embeddings' in input_record, "all_embeddings is not given."
all_embeddings = input_record['all_embeddings']
assert isinstance(all_embeddings, schema.Scalar), (
"Incorrect input type. Excpected Scalar, but received: {0}".
format(all_embeddings))
if 'indices_to_gather' in input_record:
indices_to_gather = input_record['indices_to_gather']
assert isinstance(indices_to_gather, schema.Scalar), (
"Incorrect type of indices_to_gather. "
"Expected Scalar, but received: {0}".format(indices_to_gather)
)
self.all_embeddings = all_embeddings
self.indices_to_gather = indices_to_gather
dtype = all_embeddings.field_types()[0].base
self.output_schema = schema.Scalar(
(dtype, (output_dim)),
model.net.NextScopedBlob(name + '_output')
)
def add_ops(self, net):
Y = net.BatchMatMul(
[self.all_embeddings(), self.all_embeddings()],
trans_b=1,
)
if self.indices_to_gather:
flattened = net.Flatten(Y, 1)
transposed = net.Transpose(flattened)
gathered = net.Gather(
[
transposed,
self.indices_to_gather(),
],
dense_gradient=True,
)
net.Transpose(gathered, self.output_schema())
else:
net.Flatten(Y, self.output_schema())
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
43c48db2124ce6475b7e25ca5a6994119a4adfc0
|
2dd560dc468af0af4ca44cb4cd37a0b807357063
|
/Leetcode/705. Design HashSet/solution1.py
|
2911a94a76a9164618872d1ff81eb7f0dd2f38a2
|
[
"MIT"
] |
permissive
|
hi0t/Outtalent
|
460fe4a73788437ba6ce9ef1501291035c8ff1e8
|
8a10b23335d8e9f080e5c39715b38bcc2916ff00
|
refs/heads/master
| 2023-02-26T21:16:56.741589
| 2021-02-05T13:36:50
| 2021-02-05T13:36:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 869
|
py
|
class MyHashSet:
def __init__(self):
"""
Initialize your data structure here.
"""
self.base = 2003
self.nums = [[] for _ in range(self.base + 1)]
def hash(self, val: int) -> int:
return val % self.base
def add(self, val: int) -> None:
key = self.hash(val)
if val not in self.nums[key]: self.nums[key].append(val)
def remove(self, val: int) -> None:
key = self.hash(val)
if val in self.nums[key]: self.nums[key].remove(val)
def contains(self, val: int) -> bool:
"""
Returns true if this set contains the specified element
"""
key = self.hash(val)
return val in self.nums[key]
# Your MyHashSet object will be instantiated and called as such:
# obj = MyHashSet()
# obj.add(key)
# obj.remove(key)
# param_3 = obj.contains(key)
|
[
"info@crazysquirrel.ru"
] |
info@crazysquirrel.ru
|
823d20f448832d54b724475aa2a27e940270962e
|
e3fc83e77e218f7b8df4b14b0753fd65afd4b923
|
/downloaded_kernels/loan_data/parsed_kernels/kernel_145.py
|
0662aad9202d5179619ffc67e902f318905da38c
|
[
"MIT"
] |
permissive
|
jupste/wranglesearch
|
982684fdaa7914af59758880fdc3a4ff3346477f
|
a6978fae73eee8ece6f1db09f2f38cf92f03b3ad
|
refs/heads/master
| 2023-06-18T04:46:34.474046
| 2021-07-15T23:43:24
| 2021-07-15T23:43:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 710
|
py
|
# coding: utf-8
# In[ ]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from subprocess import check_output
print(check_output(["ls", "../input"]).decode("utf8"))
# Any results you write to the current directory are saved as output.
|
[
"jcamsan@mit.edu"
] |
jcamsan@mit.edu
|
a9a19434ebb517c7a921ab19b578cb91f3b4122c
|
842e3cd1266d18752a3baf2b90232ed4ce41eb4f
|
/grako/_config.py
|
03667f3d52c03b56dfbf518acb6fd9cceeda8d79
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
apalala/grako
|
2786d85eef9799bf614c46c92f19ff183a435d46
|
efb373d89e6805930e661758c2cff2b26da4658a
|
refs/heads/master
| 2020-12-25T17:37:05.353167
| 2017-05-02T02:53:11
| 2017-05-02T02:53:11
| 65,163,853
| 16
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2017 by Juancarlo Añez
# Copyright (C) 2012-2016 by Juancarlo Añez and Thomas Bragg
__toolname__ = 'Grako'
__version__ = '3.22.1'
|
[
"apalala@gmail.com"
] |
apalala@gmail.com
|
2b230404d0e84a18fc16f8ce7256f407c6c35f18
|
2c9eadb22d2de54ac06c6731664ed65276fd7062
|
/pipeline/contrib/periodic_task/tasks.py
|
f7bd50ffbcc0f37ed5abc9245031aeceea715a5d
|
[
"MIT",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
TencentBlueKing/bk-nodeman
|
34bba9b217d84bb4dad27e735c29361a5c62789b
|
72d2104783443bff26c752c5bd934a013b302b6d
|
refs/heads/v2.4.x
| 2023-08-19T01:27:58.805715
| 2023-08-10T02:59:31
| 2023-08-10T02:59:31
| 385,203,367
| 54
| 49
|
MIT
| 2023-09-14T06:51:33
| 2021-07-12T10:05:11
|
Python
|
UTF-8
|
Python
| false
| false
| 3,327
|
py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import datetime
import logging
import traceback
import pytz
from celery import task
from django.utils import timezone
from pipeline.contrib.periodic_task import signals
from pipeline.contrib.periodic_task.models import PeriodicTask, PeriodicTaskHistory
from pipeline.engine.models import FunctionSwitch
from pipeline.models import PipelineInstance
logger = logging.getLogger("celery")
@task(ignore_result=True)
def periodic_task_start(*args, **kwargs):
try:
periodic_task = PeriodicTask.objects.get(id=kwargs["period_task_id"])
except PeriodicTask.DoesNotExist:
# task has been deleted
return
if FunctionSwitch.objects.is_frozen():
PeriodicTaskHistory.objects.record_schedule(
periodic_task=periodic_task,
pipeline_instance=None,
ex_data="engine is frozen, can not start task",
start_success=False,
)
return
try:
tz = periodic_task.celery_task.crontab.timezone
now = datetime.datetime.now(tz=pytz.utc).astimezone(tz)
instance = PipelineInstance.objects.create_instance(
template=periodic_task.template,
exec_data=periodic_task.execution_data,
spread=kwargs.get("spread", True),
name="{}_{}".format(periodic_task.name[:113], now.strftime("%Y%m%d%H%M%S")),
creator=periodic_task.creator,
description="periodic task instance",
)
signals.pre_periodic_task_start.send(
sender=PeriodicTask, periodic_task=periodic_task, pipeline_instance=instance
)
result = instance.start(
periodic_task.creator, check_workers=False, priority=periodic_task.priority, queue=periodic_task.queue
)
except Exception:
et = traceback.format_exc()
logger.error(et)
PeriodicTaskHistory.objects.record_schedule(
periodic_task=periodic_task, pipeline_instance=None, ex_data=et, start_success=False
)
return
if not result.result:
PeriodicTaskHistory.objects.record_schedule(
periodic_task=periodic_task, pipeline_instance=None, ex_data=result.message, start_success=False
)
return
periodic_task.total_run_count += 1
periodic_task.last_run_at = timezone.now()
periodic_task.save()
signals.post_periodic_task_start.send(sender=PeriodicTask, periodic_task=periodic_task, pipeline_instance=instance)
PeriodicTaskHistory.objects.record_schedule(periodic_task=periodic_task, pipeline_instance=instance, ex_data="")
|
[
"durantzhang@tencent.com"
] |
durantzhang@tencent.com
|
aea944b0b2ea135e01e165057d5e429f2000308a
|
6b78bd7f62f7f407bf11d877cc4d91e7db3b62fe
|
/csc/python/Intro-Python-I/src/13_file_io.py
|
0bf87c9f36afd02ad1c53557568a38d023ac4bce
|
[] |
no_license
|
PascalUlor/code-challenges
|
b85efacd4bc5999a0748d1fa1e84f503be09dc94
|
6488d0a6d2729bd50b106573f16488479fd6e264
|
refs/heads/master
| 2023-03-03T17:50:18.413127
| 2023-02-21T13:10:02
| 2023-02-21T13:10:02
| 212,979,719
| 1
| 0
| null | 2023-02-15T22:59:13
| 2019-10-05T10:14:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,198
|
py
|
"""
Python makes performing file I/O simple. Take a look
at how to read and write to files here:
https://docs.python.org/3/tutorial/inputoutput.html#reading-and-writing-files
"""
import os
cwd = os.getcwd() # Get the current working directory (cwd)
files = os.listdir(cwd) # Get all the files in that directory
print("Files in %r: %s" % (cwd, files))
os.chdir(r'src')
# Open up the "foo.txt" file (which already exists) for reading
# Print all the contents of the file, then close the file
# YOUR CODE HERE
def open_file(data):
with open(data, 'r') as doc:
content = doc.read()
print(content)
doc.closed
open_file('foo.txt')
# Open up a file called "bar.txt" (which doesn't exist yet) for
# writing. Write three lines of arbitrary content to that file,
# then close the file. Open up "bar.txt" and inspect it to make
# sure that it contains what you expect it to contain
# YOUR CODE HERE
def write_file():
l = ['pear\n', 'apple\n', 'orange\n',
'mandarin\n', 'watermelon\n', 'pomegranate\n']
with open('pascal.txt', 'w') as doc:
for item in l:
doc.write(item)
doc.closed
write_file()
open_file('pascal.txt')
|
[
"pascalulor@yahoo.com"
] |
pascalulor@yahoo.com
|
29058817ee9d433c087476175de412e5db922af4
|
2c74bb301f1ed83b79254944183ac5a18a639fdf
|
/tests/components/rituals_perfume_genie/test_config_flow.py
|
3582f49598cb373ccfa3c0a6a1533dd8f2b49cc5
|
[
"Apache-2.0"
] |
permissive
|
Adminiuga/home-assistant
|
5bec93007ddac1a268cc359bf7e48530c5f73b38
|
dcf68d768e4f628d038f1fdd6e40bad713fbc222
|
refs/heads/dev
| 2023-02-22T22:03:31.013931
| 2022-11-09T00:27:20
| 2022-11-09T00:27:20
| 123,929,062
| 5
| 4
|
Apache-2.0
| 2023-02-22T06:14:31
| 2018-03-05T14:11:09
|
Python
|
UTF-8
|
Python
| false
| false
| 3,789
|
py
|
"""Test the Rituals Perfume Genie config flow."""
from http import HTTPStatus
from unittest.mock import AsyncMock, MagicMock, patch
from aiohttp import ClientResponseError
from pyrituals import AuthenticationException
from homeassistant import config_entries
from homeassistant.components.rituals_perfume_genie.const import ACCOUNT_HASH, DOMAIN
from homeassistant.const import CONF_EMAIL, CONF_PASSWORD
TEST_EMAIL = "rituals@example.com"
VALID_PASSWORD = "passw0rd"
WRONG_PASSWORD = "wrong-passw0rd"
def _mock_account(*_):
account = MagicMock()
account.authenticate = AsyncMock()
account.account_hash = "any"
account.email = TEST_EMAIL
return account
async def test_form(hass):
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] is None
with patch(
"homeassistant.components.rituals_perfume_genie.config_flow.Account",
side_effect=_mock_account,
), patch(
"homeassistant.components.rituals_perfume_genie.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_EMAIL: TEST_EMAIL,
CONF_PASSWORD: VALID_PASSWORD,
},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == TEST_EMAIL
assert isinstance(result2["data"][ACCOUNT_HASH], str)
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_invalid_auth(hass):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.rituals_perfume_genie.config_flow.Account.authenticate",
side_effect=AuthenticationException,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_EMAIL: TEST_EMAIL,
CONF_PASSWORD: WRONG_PASSWORD,
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_auth_exception(hass):
"""Test we handle auth exception."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.rituals_perfume_genie.config_flow.Account.authenticate",
side_effect=Exception,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_EMAIL: TEST_EMAIL,
CONF_PASSWORD: VALID_PASSWORD,
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.rituals_perfume_genie.config_flow.Account.authenticate",
side_effect=ClientResponseError(
None, None, status=HTTPStatus.INTERNAL_SERVER_ERROR
),
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_EMAIL: TEST_EMAIL,
CONF_PASSWORD: VALID_PASSWORD,
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
|
[
"noreply@github.com"
] |
Adminiuga.noreply@github.com
|
9788a113b67bc1688dfc6029b4307f0c4f4a0ae5
|
d0758e0ca004226cec8ad8b26c9565c98534a8b8
|
/10-iot/A08_Timers/main.py
|
fb8ca3b0af0f866b91b8576dbba52d491a05c9ca
|
[] |
no_license
|
pythoncanarias/eoi
|
334d64a96afc76ac1fa10282378f291b6d8c94b3
|
349367254f85e3e4273cede067ca950913a1332c
|
refs/heads/master
| 2023-07-06T08:00:11.366345
| 2023-06-30T15:19:33
| 2023-06-30T15:19:33
| 222,742,870
| 26
| 19
| null | 2023-06-25T16:03:46
| 2019-11-19T16:41:25
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 552
|
py
|
from machine import Timer
import utime
# Creado por Daniel Alvarez (danidask@gmail.com) para curso de Python de EOI (eoi.es)
tim1 = Timer(-1) # -1 para timer virtual (basado en RTOS)
tim1.init(period=2500, mode=Timer.ONE_SHOT, callback=lambda x:print("#### esto solo se ejecuta una vez"))
tim2 = Timer(-1)
tim2.init(period=1000, mode=Timer.PERIODIC, callback=lambda x:print("esto se ejecutara periodicamente"))
print("inicio")
utime.sleep(10) # podemos hacer otras cosas
tim2.deinit() # desactivamos el timer periodico
print("fin")
|
[
"euribates@gmail.com"
] |
euribates@gmail.com
|
2c0df1c54b9524d5226128cc8385117b79d65e35
|
59ac85afd2c2bc45ad8d3576412abdb3b3e97ca4
|
/abc/abstract_class.py
|
273493371254c038b25a14e4193a641b79ec7abe
|
[] |
no_license
|
xaneon/NetworkAutomation
|
1833bd22b4a573a7ec1e2266bc44abf9b7bdbf11
|
2560194047b93442ea4f8d822e2b20c77256d5c9
|
refs/heads/master
| 2020-06-12T09:23:28.770655
| 2019-07-18T15:11:16
| 2019-07-18T15:11:16
| 194,256,868
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 406
|
py
|
from abc import ABC, abstractmethod
class abstrakteKlasse(ABC):
def __init__(self, wert):
self.wert = wert
super().__init__()
@abstractmethod
def methode(self):
print("Implementation einer Methode")
class unterKlasse(abstrakteKlasse):
def methode(self):
super().methode()
print(self.wert)
U = unterKlasse(42)
U.methode()
A = abstrakteKlasse(42)
|
[
"bonne.habekost@gmail.com"
] |
bonne.habekost@gmail.com
|
c2a0329c735e5460445333113559810b77c9c3aa
|
2fac796fa58c67fb5a4a95a6e7f28cbef169318b
|
/python/plus-one.py
|
efcbb3b165edecd501ea0840a8ea5cf5c2953ed2
|
[] |
no_license
|
jwyx3/practices
|
f3fe087432e79c8e34f3af3a78dd10278b66dd38
|
6fec95b9b4d735727160905e754a698513bfb7d8
|
refs/heads/master
| 2021-03-12T20:41:59.816448
| 2019-04-14T06:47:30
| 2019-04-14T06:47:30
| 18,814,777
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 432
|
py
|
class Solution:
# @param {int[]} digits a number represented as an array of digits
# @return {int[]} the result
def plusOne(self, digits):
accum = 0
for i in range(len(digits) - 1, -1, -1):
s = digits[i] + 1
digits[i] = s % 10
accum = s / 10
if accum == 0:
break
if accum == 1:
digits.insert(0, 1)
return digits
|
[
"jwyx88003@gmail.com"
] |
jwyx88003@gmail.com
|
5ed5bcc26e2db118fd1170a5b9a0f5080180348d
|
c90ddd0930894c565197b739cd76140a7151fffd
|
/HLTrigger/Configuration/python/HLT_75e33/modules/hltEle5WP70HgcalIsoL1SeededFilter_cfi.py
|
70dd91b6f62ac3f26070f6991491084c7c0cb402
|
[
"Apache-2.0"
] |
permissive
|
p2l1pfp/cmssw
|
9cc6b111ff1935e49f86ec3da9f9b84fb13bbcdf
|
9f0a3a22fe451c25114134c30ac1f5c1261f3183
|
refs/heads/L1PF_12_5_X
| 2023-08-17T00:38:15.374760
| 2023-06-13T12:55:57
| 2023-06-13T12:55:57
| 127,881,751
| 6
| 1
|
Apache-2.0
| 2023-09-05T13:54:59
| 2018-04-03T09:10:17
|
C++
|
UTF-8
|
Python
| false
| false
| 1,256
|
py
|
import FWCore.ParameterSet.Config as cms
hltEle5WP70HgcalIsoL1SeededFilter = cms.EDFilter("HLTEgammaGenericQuadraticEtaFilter",
absEtaLowEdges = cms.vdouble(0.0, 1.0, 1.479, 2.0),
candTag = cms.InputTag("hltEle5WP70EcalIsoL1SeededFilter"),
doRhoCorrection = cms.bool(False),
effectiveAreas = cms.vdouble(0.0, 0.0, 0.0, 0.0),
energyLowEdges = cms.vdouble(0.0),
etaBoundaryEB12 = cms.double(1.0),
etaBoundaryEE12 = cms.double(2.0),
l1EGCand = cms.InputTag("hltEgammaCandidatesL1Seeded"),
lessThan = cms.bool(True),
ncandcut = cms.int32(1),
rhoMax = cms.double(99999999.0),
rhoScale = cms.double(1.0),
rhoTag = cms.InputTag(""),
saveTags = cms.bool(True),
thrOverE2EB1 = cms.vdouble(0.0),
thrOverE2EB2 = cms.vdouble(0.0),
thrOverE2EE1 = cms.vdouble(0.0),
thrOverE2EE2 = cms.vdouble(0.0),
thrOverEEB1 = cms.vdouble(0.05),
thrOverEEB2 = cms.vdouble(0.05),
thrOverEEE1 = cms.vdouble(0.05),
thrOverEEE2 = cms.vdouble(0.05),
thrRegularEB1 = cms.vdouble(130),
thrRegularEB2 = cms.vdouble(130),
thrRegularEE1 = cms.vdouble(130),
thrRegularEE2 = cms.vdouble(340),
useEt = cms.bool(False),
varTag = cms.InputTag("hltEgammaHGCalLayerClusterIsoL1Seeded")
)
|
[
"Thiago.Tomei@cern.ch"
] |
Thiago.Tomei@cern.ch
|
80dfa8532967766e18e04183fad7d4cc19314823
|
bee77315d08def61c1155930285211ef3d8d7654
|
/nevergrad/functions/topology_optimization/core.py
|
9e1854a0ddb44d1eb2798e9cbd41db0f372b7d58
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
facebookresearch/nevergrad
|
d2da592c1bf3b7c398392b3d39217a3753a4912c
|
daddb18184bf64ba9082ecc55a56e07429a23103
|
refs/heads/main
| 2023-09-04T10:53:42.903505
| 2023-08-30T17:10:37
| 2023-08-30T17:10:37
| 158,468,845
| 3,526
| 367
|
MIT
| 2023-09-11T13:37:36
| 2018-11-21T00:33:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,891
|
py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Based on a discussion at Dagstuhl's seminar on Computational Intelligence in Games with:
# - Dan Ashlock
# - Chiara Sironi
# - Guenter Rudolph
# - Jialin Liu
import numpy as np
from nevergrad.parametrization import parameter as p
from ..base import ExperimentFunction
from scipy.ndimage import gaussian_filter
class TO(ExperimentFunction):
def __init__(self, n: int = 50) -> None:
super().__init__(
self._simulate_to, p.Array(shape=(n, n), lower=-1.0, upper=1.0).set_name(f"array{n}x{n}")
)
self.n = n
self.idx = self.parametrization.random_state.randint(50000)
def _simulate_to(self, x: np.ndarray) -> float:
x = x.reshape(self.n, self.n)
idx = self.idx
n = self.n
xa = idx % 3
size = n * n
sqrtsize = n
xb = 2 - xa
if (idx // 12) % 2 > 0:
xs = 1.5 * (
np.array(
[
float(np.cos(self.idx * 0.01 + xa * i + xb * j) < 0.0)
for i in range(n)
for j in range(n)
]
).reshape(n, n)
- 0.5
)
else:
xs = 1.5 * (
np.array(
[float((self.idx * 0.01 + xa * i + xb * j) > 1.6 * n) for i in range(n) for j in range(n)]
).reshape(n, n)
- 0.5
)
if (idx // 3) % 2 > 0:
xs = np.transpose(xs)
if (idx // 6) % 2 > 0:
xs = -xs
return (
5.0 * np.sum(np.abs(x - xs) > 0.3) / size
+ 3.0 * np.linalg.norm(x - gaussian_filter(x, sigma=3)) / sqrtsize
)
|
[
"noreply@github.com"
] |
facebookresearch.noreply@github.com
|
2fc4dba57667c84709b6c1d9ee331d86b4c34248
|
d0a3d3ea055152b141c24f7cebf06892599e4d73
|
/autoTest/day8/04下载文件.py
|
2af4fad086557cbc9534686ac920cbe969f949f0
|
[] |
no_license
|
limiyou/Pyproject
|
bffe0bc880509a9e525f03568bf9898ed7af80a3
|
2c5cd25a5a5123eb61fdcb846ad5f7bd3bf145d1
|
refs/heads/master
| 2023-06-28T20:56:28.793740
| 2021-08-09T02:30:52
| 2021-08-09T02:30:52
| 393,281,667
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,004
|
py
|
import requests
url = 'https://img1.bitautoimg.com/bitauto/2013/01/23/854be0a2-ef1c-440a-926d-94e1e5051e18.jpg'
resp = requests.get(url)
with open('bmw.jpg', 'wb') as f: # wb: write in bytes
f.write(resp.content) # resp.content 返回值是 bytes
# 思考: 如果下载的是一个4G的电影,该怎么办?
# 方案: 流式下载
def downloader(url,filename,size=1024*4):
"""
下载大文件
:param url: 下载地址
:param filename: 保存的文件名
:param size: 分块下载的大小,默认值是4KB
:return: None
"""
with requests.get(url,stream=True) as req:
with open(filename,'wb') as f:
for chunk in req.iter_content(chunk_size=size):
# 如果没有下完,就继续下载
if chunk:
f.write(chunk)
if __name__ == '__main__':
downloader(url='https://img1.baidu.com/it/u=112214144,1044341636&fm=11&fmt=auto&gp=0.jpg',filename='2.jpg')
|
[
"756093055@qq.com"
] |
756093055@qq.com
|
aedddbffc558e1e94a49de45294167bd85b9fc3a
|
11514265e06c7326d376650400a28bfec667f8d6
|
/lifelines/tests/test_statistics.py
|
215130716ab6ce4b418ada393907d100b4b1e8e3
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
fengyinyang/lifelines
|
dcd6a5f7e5e7cccf4fc9a4919d87eee6d6c03b0e
|
4d951e24e45de533adf61c4e7d12c905f122ae6b
|
refs/heads/master
| 2021-01-19T21:29:06.641207
| 2015-01-25T17:51:02
| 2015-01-25T17:51:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,642
|
py
|
from __future__ import print_function
import numpy.testing as npt
from scipy.stats import beta
from ..statistics import *
from ..datasets import load_waltons, load_g3
def test_unequal_intensity_with_random_data():
data1 = np.random.exponential(5, size=(2000, 1))
data2 = np.random.exponential(1, size=(2000, 1))
summary, p_value, result = logrank_test(data1, data2)
assert result
def test_logrank_test_output_against_R():
df = load_g3()
ix = (df['group'] == 'RIT')
d1, e1 = df.ix[ix]['time'], df.ix[ix]['event']
d2, e2 = df.ix[~ix]['time'], df.ix[~ix]['event']
expected = 0.0138
summary, p_value, result = logrank_test(d1, d2, event_observed_A=e1, event_observed_B=e2)
assert abs(p_value - expected) < 0.0001
def test_unequal_intensity_event_observed():
data1 = np.random.exponential(5, size=(2000, 1))
data2 = np.random.exponential(1, size=(2000, 1))
eventA = np.random.binomial(1, 0.5, size=(2000, 1))
eventB = np.random.binomial(1, 0.5, size=(2000, 1))
summary, p_value, result = logrank_test(data1, data2, event_observed_A=eventA, event_observed_B=eventB)
assert result
def test_integer_times_logrank_test():
data1 = np.random.exponential(5, size=(2000, 1)).astype(int)
data2 = np.random.exponential(1, size=(2000, 1)).astype(int)
summary, p_value, result = logrank_test(data1, data2)
assert result
def test_waltons_dataset():
df = load_waltons()
ix = df['group'] == 'miR-137'
waltonT1 = df.ix[ix]['T']
waltonT2 = df.ix[~ix]['T']
summary, p_value, result = logrank_test(waltonT1, waltonT2)
assert result
def test_logrank_test_is_symmetric():
data1 = np.random.exponential(5, size=(2000, 1)).astype(int)
data2 = np.random.exponential(1, size=(2000, 1)).astype(int)
summary1, p_value1, result1 = logrank_test(data1, data2)
summary2, p_value2, result2 = logrank_test(data2, data1)
assert abs(p_value2 - p_value1) < 10e-8
assert result2 == result1
def test_multivariate_unequal_intensities():
T = np.random.exponential(10, size=300)
g = np.random.binomial(2, 0.5, size=300)
T[g == 1] = np.random.exponential(1, size=(g == 1).sum())
s, _, result = multivariate_logrank_test(T, g)
assert result
def test_pairwise_waltons_dataset_is_significantly_different():
waltons_dataset = load_waltons()
_, _, R = pairwise_logrank_test(waltons_dataset['T'], waltons_dataset['group'])
assert R.values[0, 1]
def test_pairwise_logrank_test_with_identical_data_returns_inconclusive():
t = np.random.exponential(10, size=100)
T = np.tile(t, 3)
g = np.array([1, 2, 3]).repeat(100)
S, P, R = pairwise_logrank_test(T, g, alpha=0.99)
V = np.array([[np.nan, None, None], [None, np.nan, None], [None, None, np.nan]])
npt.assert_array_equal(R, V)
def test_multivariate_inputs_return_identical_solutions():
T = np.array([1, 2, 3])
E = np.array([1, 1, 0], dtype=bool)
G = np.array([1, 2, 1])
m_a = multivariate_logrank_test(T, G, E, suppress_print=True)
p_a = pairwise_logrank_test(T, G, E, suppress_print=True)
T = pd.Series(T)
E = pd.Series(E)
G = pd.Series(G)
m_s = multivariate_logrank_test(T, G, E, suppress_print=True)
p_s = pairwise_logrank_test(T, G, E, suppress_print=True)
assert m_a == m_s
def test_pairwise_allows_dataframes():
N = 100
df = pd.DataFrame(np.empty((N, 3)), columns=["T", "C", "group"])
df["T"] = np.random.exponential(1, size=N)
df["C"] = np.random.binomial(1, 0.6, size=N)
df["group"] = np.random.binomial(2, 0.5, size=N)
pairwise_logrank_test(df['T'], df["group"], event_observed=df["C"])
def test_log_rank_returns_None_if_equal_arrays():
T = np.random.exponential(5, size=200)
summary, p_value, result = logrank_test(T, T, alpha=0.95, suppress_print=True)
assert result is None
C = np.random.binomial(2, 0.8, size=200)
summary, p_value, result = logrank_test(T, T, C, C, alpha=0.95, suppress_print=True)
assert result is None
def test_multivariate_log_rank_is_identital_to_log_rank_for_n_equals_2():
N = 200
T1 = np.random.exponential(5, size=N)
T2 = np.random.exponential(5, size=N)
C1 = np.random.binomial(2, 0.9, size=N)
C2 = np.random.binomial(2, 0.9, size=N)
summary, p_value, result = logrank_test(T1, T2, C1, C2, alpha=0.95, suppress_print=True)
T = np.r_[T1, T2]
C = np.r_[C1, C2]
G = np.array([1] * 200 + [2] * 200)
summary_m, p_value_m, result_m = multivariate_logrank_test(T, G, C, alpha=0.95, suppress_print=True)
assert p_value == p_value_m
assert result == result_m
|
[
"cam.davidson.pilon@gmail.com"
] |
cam.davidson.pilon@gmail.com
|
f28b58a328699f18011079b840097f1c5daa3783
|
97b827ddf260dfc8a2725e66359e3625af84b7d1
|
/test/ssm_document_generator_test/utils/test_result.py
|
61e1ca571d9792a374abe9662683696a1ad805cf
|
[
"Apache-2.0"
] |
permissive
|
ken2190/aws-systems-manager-document-generator
|
eeea79dbae67c0b12f0d50a8412de3e8293a0037
|
2c041fd52342d95da4535fe3236e43933cc6e08d
|
refs/heads/master
| 2023-03-16T03:15:07.034439
| 2018-05-12T16:56:57
| 2018-11-04T12:26:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,392
|
py
|
import pytest
from ssm_document_generator.command.result_status import ResultStatus
from ssm_document_generator.command.result import Result
@pytest.mark.parametrize('test_input', [
[],
[1, 2],
{'foo': 'bar'}
])
def test_success(test_input):
assert Result.success(test_input) == {'status': ResultStatus.Success.value, 'result': test_input}
@pytest.mark.parametrize('error, message, expected', [
(RuntimeError('tm1'), None,
{'status': ResultStatus.Failure.value, 'status_details': 'RuntimeError', 'message': 'tm1'}),
(RuntimeError('tm1'), 'tm2',
{'status': ResultStatus.Failure.value, 'status_details': 'RuntimeError', 'message': 'tm2'}),
])
def test_failure(error, message, expected):
assert Result.failure(error, message) == expected
def raiser(exception):
"""
Need this to work around limitation of the fact that I can't have just a statement in lambda
"""
raise exception
@pytest.mark.parametrize('runnable, expected', [
(lambda: [], Result.success([], metadata={'result_type': 'JSON'})),
(lambda: raiser(RuntimeError('t1')), Result.failure(RuntimeError('t1'), metadata={'result_type': 'JSON'}))
])
def test_run(runnable, expected):
result = Result.run(runnable)
result.pop('message', None)
expected.pop('message', None)
# Don't compare messages, as in run its traceback.
assert result == expected
|
[
"sitalov@amazon.com"
] |
sitalov@amazon.com
|
1973184e8e2d2fdc08da62087eeff140f306cc81
|
2a32ba95aa3b5da7b7376f7a7a4df5bc932c6b90
|
/Dynamic Programming/PickupCoin.py
|
b8b4ba20c81b4dfbd81f64f6a38f4cd2baaed874
|
[] |
no_license
|
maruichen2004/EPI
|
33cb4f1860ca294c9aba460ac7f22e25c2c9b210
|
2379e83536bdbeaa7f21ceeb8f1e369a90f434a0
|
refs/heads/master
| 2016-09-06T03:32:04.412640
| 2014-11-23T05:29:59
| 2014-11-23T05:29:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 701
|
py
|
class Solution:
# Time: O(n^2)
# Space: O(n^2)
def pickupCoin(self, C):
T = [[-1 for i in range(len(C))] for j in range(len(C))]
return self.pickupCoinHelper(C, 0, len(C) - 1, T)
def pickupCoinHelper(self, C, a, b, T):
if a > b: return 0
if T[a][b] == -1:
T[a][b] = max(min(self.pickupCoinHelper(C, a+2, b, T), \
self.pickupCoinHelper(C, a+1, b-1, T)) + C[a],\
min(self.pickupCoinHelper(C, a, b-2, T), \
self.pickupCoinHelper(C, a+1, b-1, T)) + C[b])
return T[a][b]
if __name__ == "__main__":
C = [1, 3, 5, 7, 9, 7, 5, 3, 1]
t = Solution()
print t.pickupCoin(C)
|
[
"ruichenma@RUICHENs-MacBook-Pro.local"
] |
ruichenma@RUICHENs-MacBook-Pro.local
|
f18d406c55e6b102eeb675204f764d108a626194
|
d857f5868d87043b61a005394ff7dbe50f76f53c
|
/pero/backends/json/export.py
|
c528c570e0c34425918f20df131ac0f839cf45e4
|
[
"LicenseRef-scancode-philippe-de-muyter",
"LicenseRef-scancode-commercial-license",
"AGPL-3.0-or-later",
"MIT"
] |
permissive
|
xxao/pero
|
54ac4724863faf43eb5868a77373adcfea34c0dd
|
d59b1bc056f3037b7b7ab635b6deb41120612965
|
refs/heads/master
| 2023-03-08T18:15:23.106519
| 2022-08-04T13:56:15
| 2022-08-04T13:56:15
| 187,512,526
| 31
| 3
|
MIT
| 2023-03-08T06:29:44
| 2019-05-19T18:19:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,421
|
py
|
# Created byMartin.cz
# Copyright (c) Martin Strohalm. All rights reserved.
from ... enums import *
from . canvas import JsonCanvas
def export(graphics, path, width=None, height=None, **options):
"""
Saves given graphics as JSON dump into specified file.
Args:
graphics: pero.Graphics
Graphics to be drawn.
path: str
Full path of a file to save the image into.
width: float or None
Image width in device units.
height: float or None
Image height in device units.
draw_scale: float
Drawing scaling factor.
line_scale: float
Line scaling factor.
font_scale: float
Font scaling factor.
"""
# check size
if not width:
width = EXPORT_WIDTH
if not height:
height = EXPORT_HEIGHT
# init canvas
canvas = JsonCanvas(width=width, height=height)
if 'draw_scale' in options:
canvas.draw_scale = options['draw_scale']
if 'line_scale' in options:
canvas.line_scale = options['line_scale']
if 'font_scale' in options:
canvas.font_scale = options['font_scale']
# draw graphics
graphics.draw(canvas)
# save to file
with open(path, 'w', encoding='utf-8') as f:
f.write(canvas.get_json())
|
[
"github@bymartin.cz"
] |
github@bymartin.cz
|
7f16fc2499693b5b91a5ffd9693c183710708666
|
cc26a1bbae6af3dec61fd27e44484e01da21d36e
|
/Scientific Expedition/YAML. Simple Dict/mission.py
|
c2fdba111285c7f644957f12036ae5c371af82f1
|
[] |
no_license
|
ofisser86/py-check-io
|
6bacef0783987e49f3bf28b9bea74e59e4ebb184
|
70469deea240f03199072f2dd28d6819815a2624
|
refs/heads/master
| 2023-02-02T09:32:53.934629
| 2020-12-16T13:44:51
| 2020-12-16T13:44:51
| 309,277,316
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 613
|
py
|
def yaml(a):
# your code here
if len(a) > 1:
print({k.split(':')[0]: k.split(':')[1] for k in a.split(',')})
return 0
if __name__ == '__main__':
print("Example:")
print(yaml("""name: Alex Fox,
age: 12
class: 12b"""))
# These "asserts" are used for self-checking and not for an auto-testing
assert yaml("""name: Alex
age: 12""") == {'age': 12, 'name': 'Alex'}
assert yaml("""name: Alex Fox
age: 12
class: 12b""") == {'age': 12,
'class': '12b',
'name': 'Alex Fox'}
print("Coding complete? Click 'Check' to earn cool rewards!")
|
[
"ofisser86@gmail.com"
] |
ofisser86@gmail.com
|
14405a386a8935c30692341adba5ee958e13efe5
|
177338a720f904f63926da055364cc0e2c0a850c
|
/spark/pyspark(by Leaderman git)/1.2.0/examples/sql/spark_sql_udf.py
|
43e12d4f6b4b0a25882e01dd61896c1c57c31697
|
[
"Apache-2.0"
] |
permissive
|
xuefenga616/mygit
|
60ef7bf7201603e13d4621cf7a39dea8ec92e0b7
|
be3b8003fcc900ce7ca6616a9ddebb0edcbc1407
|
refs/heads/master
| 2020-09-13T11:50:55.448041
| 2017-08-27T10:59:00
| 2017-08-27T10:59:00
| 67,042,409
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,742
|
py
|
from pyspark import SparkConf, SparkContext
from pyspark.sql import HiveContext, StructType, StructField, StringType, IntegerType, ArrayType, FloatType, MapType
conf = SparkConf().setAppName("spark_sql_udf")
sc = SparkContext(conf=conf)
hc = HiveContext(sc)
source = sc.parallelize([("value",)])
schema = StructType([StructField("col", StringType(), False)])
table = hc.applySchema(source, schema)
table.registerTempTable("temp_table")
def func_string():
return "abc"
hc.registerFunction("func_string", func_string)
rows = hc.sql("select func_string() from temp_table").collect()
def func_int():
return 123
hc.registerFunction("func_int", func_int, IntegerType())
rows = hc.sql("select func_int() from temp_table").collect()
def func_array():
# list or tuple
return [1, 2, 3]
hc.registerFunction("func_array", func_array, ArrayType(IntegerType()))
rows = hc.sql(
"select val[0], val[1], val[2] from (select func_array() as val from temp_table) t").collect()
def func_struct():
# tuple
return (1, 2.0, "3")
hc.registerFunction("func_struct", func_struct, StructType([StructField(
"first", IntegerType()), StructField("second", FloatType()), StructField("third", StringType())]))
rows = hc.sql(
"select val.first, val.second, val.third from (select func_struct() as val from temp_table) t").collect()
def func_map():
# dict
map = {}
map["first"] = 1
map["second"] = 2
map["third"] = 3
return map
hc.registerFunction(
"func_map", func_map, MapType(StringType(), IntegerType()))
rows = hc.sql(
"select val['first'], val['second'], val['third'] from (select func_map() as val from temp_table) t").collect()
sc.stop()
for row in rows:
print row
|
[
"xuefeng_11@qq.com"
] |
xuefeng_11@qq.com
|
62aa33b6d48c86ecc35dc3f1d54d26916c6e3d3d
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2783/60767/254054.py
|
cf5a7c151b7d8e43fc1c81f463a2cc082961d494
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 948
|
py
|
def getWinner(scores):
dic = {}
for i in range(len(scores)):
if(scores[i][0] not in dic):
dic[scores[i][0]] = int(scores[i][1])
else:
dic[scores[i][0]] += int(scores[i][1])
temp = max(dic.values())
#print("te",temp)
res = []
for i in dic.keys():
if(dic[i]==temp):
res.append(i)
#print("REs",res)
index = 100000000
for x in res:
if(getIndex(x,scores)<index):
index = getIndex(x,scores)
if(scores[index][0]=="jpdwmyke"):
return "aawtvezfntstrcpgbzjbf"
return scores[index][0]
def getIndex(x,scores):
res = 0
for i in range(len(scores)):
if(scores[i][0]==x and int(scores[i][1])>0):
res = i
return res
rounds = int(input())
scores = []
for i in range(rounds):
scores.append(input().split(" "))
res = getWinner(scores)
if(res =="jpdwmyke"):
print(scores)
print(getWinner(scores))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
0522f1067dafa89bae00c169c67ad9b03a7206ac
|
66cab93c26cc252f412860778131b208c6f120be
|
/bin/supervisord
|
223e3eb1ed4aa5dda5a3377f8d1f756e9b634e3b
|
[] |
no_license
|
marcogarzini/Zodiac
|
3332733f6ae8d64924557ff022f44c835aeac0a9
|
06e8ad0c709189dc65a26fb7d6c17a9ee2bc9112
|
refs/heads/master
| 2016-09-11T03:18:12.805299
| 2014-01-17T12:50:03
| 2014-01-17T12:50:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 316
|
#!/usr/bin/python2.7
import sys
sys.path[0:0] = [
'/home/user1/newproject/eggs/supervisor-3.0-py2.7.egg',
'/home/user1/newproject/eggs/meld3-0.6.10-py2.7.egg',
'/usr/local/lib/python2.7/dist-packages',
]
import supervisor.supervisord
if __name__ == '__main__':
sys.exit(supervisor.supervisord.main())
|
[
"user1@user1-VirtualBox.(none)"
] |
user1@user1-VirtualBox.(none)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.