blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fccd134ed2431e7cce33642e9fc7705ec4904734
|
9092e62932da86fb2af69e0529e4cbb082cfea22
|
/wifiName32Pwd63.py
|
9c30ac8432ecedb77930c68c8c6746ec52684028
|
[] |
no_license
|
FengZiQ/flushbonding
|
d09915ce4285530e3d082c0aaea029790ffbdd9d
|
5ce631c9d09790846a31332eb8e76460e5f3f08e
|
refs/heads/master
| 2020-04-01T22:29:13.256997
| 2019-06-05T02:25:14
| 2019-06-05T02:25:14
| 153,711,075
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,614
|
py
|
# coding=utf-8
import time
from to_log import to_log
from QRCodeOfNetworkConfig import wifi_mode
from dmSupport import get_device_attribute
from configFile import data_for_networkTest, open_picture
from honorRouter import Configuration
rc = Configuration()
to_log('SSID长度32/密码长度63网络配置测试\n')
if rc.wc(name='123a'*8, pwd='12'*30 + 'abc', secure=2):
# 生成SSID长度32/密码长度63网络配置二维码
wifi_mode(name='123a'*8, pwd='12'*30 + 'abc', pr='usb', dh='dhcp')
# 配网时间
time.sleep(15)
# 获取系统当前时间
nowTimestamp = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime(time.time()))
# 获取设备属性
da = get_device_attribute(data_for_networkTest.get('deviceNo'))
# 修正时间
correction_time = nowTimestamp[:-4] + str(int(nowTimestamp[-4]) + 1)
if da.get('time', 'failed')[:-3] == nowTimestamp[:-3] or da.get('time', 'failed')[:-3] == correction_time:
if da.get('persist.net.type') == 'wifi' and da.get('persist.net.dhcp') == 'true':
to_log('SSID长度32/密码长度63网络配置测试Pass\n')
to_log('配网方式:'+da.get('persist.net.type', ''))
to_log('DHCP:' + da.get('persist.net.dhcp', ''))
to_log('IP:' + da.get('sys.net.ip', ''))
to_log('MAC:' + da.get('system.net.wifi.mac', '') + '\n')
else:
to_log('请检查断言参数\n')
# 打开设备信息码
open_picture('deviceInfoCode.png')
else:
to_log('SSID长度32/密码长度63网络配置测试Failed\n')
rc.finished()
|
[
"feng1025352529@qq.com"
] |
feng1025352529@qq.com
|
16840e785de669798985dd9040d55e3037b2f01a
|
66a82c2eb7f9facff4cb0aa72f21a713dbb1cf61
|
/devices/SIP04_FZJ/01_Test/test_sip04_01.py
|
8f9e8dfee1412bdb8d75db1ffa146684f3c7300e
|
[
"MIT"
] |
permissive
|
geophysics-ubonn/reda_testing
|
894eefa8f5cddf288c639c00404c6bd12339dad7
|
c32f3faa685b77974b88ba1126a02afabfe5fd2d
|
refs/heads/master
| 2023-06-04T00:16:43.503287
| 2020-12-21T13:23:48
| 2020-12-21T13:23:48
| 110,421,246
| 0
| 1
|
NOASSERTION
| 2019-06-25T09:50:57
| 2017-11-12T09:50:26
|
Python
|
UTF-8
|
Python
| false
| false
| 314
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import reda
basepath = os.path.dirname(__file__) + os.sep
def test_load_mat():
sip = reda.SIP()
sip.import_sip04(basepath + 'Data/sip_dataA.mat')
def test_load_csv():
sip2 = reda.SIP()
sip2.import_sip04(basepath + 'Data/sip_dataA.csv')
|
[
"mweigand@geo.uni-bonn.de"
] |
mweigand@geo.uni-bonn.de
|
d94ad0d4184ebc4fb4df9f9e567f480fa0b69e93
|
5a7375bdcd7fba344d9d8e424c42e4ff6e58e5cd
|
/00_algo_prob/2529_ineuality.py
|
f007230e9a61a1f36461d2b4bf68aa212163e80e
|
[] |
no_license
|
jhee514/Algorithms
|
1d9d9f8bf11b957393ad1a169fa1a61f86d77da5
|
0ebed8f99a63eae2f9122033ab4e13b2b499fb52
|
refs/heads/master
| 2021-07-21T01:33:22.838431
| 2020-10-28T15:21:19
| 2020-10-28T15:21:19
| 226,996,192
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,123
|
py
|
import sys
sys.stdin = open("2529_input.txt", "r")
"""
10개의 숫자 중에서 k 개를 순열로 가져와
부등호를 사이사이 넣어봐
중간에 가지치기 해주면서 쭉 돌아야
"""
import itertools
def sol(k, data):
nums = list(range(10))
min_num, max_num = 10 ** (k + 1), 0
perms = itertools.permutations(nums, k + 1)
for p in perms:
if p == (1, 0, 2, 3, 4, 5, 6, 7, 9, 8):
a = 1
for i in range(k):
if data[i] == '>' and p[i] < p[i + 1]:
break
elif data[i] == '<' and p[i] > p[i + 1]:
break
# > < < < > > > < <
else:
str_num = ''
for pp in p:
str_num += str(pp)
if int(str_num) < min_num:
min_num = int(str_num)
str_min = str_num
if int(str_num) > max_num:
max_num = int(str_num)
str_max = max_num
print(str_max)
print(str_min)
T = 2
for tc in range(T):
k = int(input())
data = list(map(str, input().split()))
sol(k, data)
|
[
"514kim@gmail.com"
] |
514kim@gmail.com
|
c8ae48a6f79a42bf74407f3d6801a041d64be011
|
6a63e40b1d30b6a810c89d910ac3f8f5954002ee
|
/src/pretalx/submission/migrations/0039_submission_created.py
|
c73cbfb2440b187adbb54d325d4ffb85e8724bf3
|
[
"Apache-2.0"
] |
permissive
|
orlando/pretalx
|
47b7ab3e3258d667183066b84227b785199711b2
|
15f90dc2545f210eaf870ffbdfe0a27c70bfa0ec
|
refs/heads/master
| 2020-09-10T20:26:49.867462
| 2019-11-15T01:19:07
| 2019-11-15T01:19:07
| 221,826,314
| 2
| 0
|
NOASSERTION
| 2019-11-15T02:21:05
| 2019-11-15T02:21:04
| null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
# Generated by Django 2.2.1 on 2019-05-01 20:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('submission', '0038_auto_20190429_0750'),
]
operations = [
migrations.AddField(
model_name='submission',
name='created',
field=models.DateTimeField(auto_now_add=True, null=True),
),
]
|
[
"r@rixx.de"
] |
r@rixx.de
|
f2613ac43e286ee6c63cc7b579b00d0c613e1729
|
d532b85841b459c61d88d380e88dd08d29836d43
|
/solutions/1488_avoid_flood_in_the_city.py
|
1789aba0bebf606b5ccb155577af2e6cf7b5dc09
|
[
"MIT"
] |
permissive
|
YiqunPeng/leetcode_pro
|
ad942468df5506de9dc48a4019933f658e2a3121
|
4a508a982b125a3a90ea893ae70863df7c99cc70
|
refs/heads/master
| 2022-05-15T09:32:02.699180
| 2022-05-14T16:32:17
| 2022-05-14T16:32:17
| 182,453,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 621
|
py
|
class Solution:
def avoidFlood(self, rains: List[int]) -> List[int]:
"""Hash table.
"""
n = len(rains)
res = [1] * n
f = {}
s = []
for i, r in enumerate(rains):
if r > 0:
if r in f:
idx = bisect.bisect_left(s, f[r])
if idx == len(s):
return []
else:
res[s[idx]] = r
s.pop(idx)
f[r] = i
res[i] = -1
else:
s.append(i)
return res
|
[
"ypeng1@andrew.cmu.edu"
] |
ypeng1@andrew.cmu.edu
|
ffbdf922a169191795e21b24f226334344e6b2b8
|
8a08d39142c7b5c7dc9300717f0db6dad295ec92
|
/antelope_core/providers/parse_math.py
|
8fb0f24a50ac68f93528c7d0a658cd62da7d7e04
|
[
"BSD-3-Clause"
] |
permissive
|
msm-sardar/core
|
3eac85248914ada808882b9dedefd889756be504
|
bc88a1ed3e4c1defcbc83fa86356451ac34c178c
|
refs/heads/master
| 2023-08-24T03:56:31.892812
| 2021-10-14T01:12:02
| 2021-10-14T01:12:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 729
|
py
|
import ast
def parse_math(expression):
"""
A function I got off stackoverflow that enables python to parse user input as a mathematical expression.
probably a huge security risk. but it enables the user to enter missing characterization values during runtime.
:param expression:
:return:
"""
try:
tree = ast.parse(expression, mode='eval')
except SyntaxError:
return # not a Python expression
if not all(isinstance(node, (ast.Expression, ast.UnaryOp, ast.unaryop, ast.BinOp, ast.operator, ast.Num))
for node in ast.walk(tree)):
return # not a mathematical expression (numbers and operators)
return eval(compile(tree, filename='', mode='eval'))
|
[
"brandon.kuczenski@301south.net"
] |
brandon.kuczenski@301south.net
|
19231243102cae313e9ffe1fb4aa503ac094635f
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_permutation.py
|
995d0cfcfbff595c0f8b2d0a59d0d980653557db
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 392
|
py
|
#calss header
class _PERMUTATION():
def __init__(self,):
self.name = "PERMUTATION"
self.definitions = [u'any of the various ways in which a set of things can be ordered: ', u'one of several different forms: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
a2676e558ee7b10567e7d3604eccdaaab446eb0f
|
c1b7655fbbf5e647c9de01d55bf31f044e26b7bf
|
/HE_cell_classification/predict/predict_Local.py
|
58a36cc8ee6751d13abcac3b49b2f6dc8a825d63
|
[] |
no_license
|
sara-kassani/UNMaSk
|
ef170ddcfd7b8b5599e7d412d547084848308eb1
|
c03f56a6e926fe14b1923470d22a112892116e38
|
refs/heads/master
| 2023-07-17T12:38:46.086746
| 2021-04-29T19:59:48
| 2021-04-29T19:59:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,115
|
py
|
import os
from parse_arguments import get_parsed_arguments
from classifier.sccnn_classifier import SccnnClassifier
from classifier.subpackages import NetworkOptions
#########comment the below two lines if its running on a cpu environment###############
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
#########comment the below two lines if its running on a cpu environment###############
################################################################################################
#exp_dir-> checkpoint_path
#data_dir-> cws_path
#result_dir-> classification result_path
#detection_dir-> detection_path
#tissue_segment_dir-> tissue_segmentation_result_path if available( this parameter is optional)
################################################################################################
args = get_parsed_arguments()
opts = NetworkOptions.NetworkOptions(exp_dir=args.exp_dir,
num_examples_per_epoch_train=1,
num_examples_per_epoch_valid=1,
image_height=51,
image_width=51,
in_feat_dim=3,
in_label_dim=1,
num_of_classes=4,
batch_size=100,
data_dir=args.data_dir,
results_dir=args.results_dir,
detection_results_path=args.detection_results_path,
tissue_segment_dir=args.tissue_segment_dir,
preprocessed_dir=None,
current_epoch_num=0,
file_name_pattern=args.file_name_pattern,
pre_process=False,
color_code_file='HE_Fib_Lym_Tum_Others.csv')
opts.results_dir = (os.path.join(opts.results_dir, '2020ENS_TA_DUKE_HE_TEST'))
if not os.path.isdir(opts.results_dir):
os.makedirs(opts.results_dir)
if not os.path.isdir(os.path.join(opts.results_dir, 'mat')):
os.makedirs(os.path.join(opts.results_dir, 'mat'))
if not os.path.isdir(os.path.join(opts.results_dir, 'annotated_images')):
os.makedirs(os.path.join(opts.results_dir, 'annotated_images'))
if not os.path.isdir(os.path.join(opts.results_dir, 'csv')):
os.makedirs(os.path.join(opts.results_dir, 'csv'))
Network = SccnnClassifier(batch_size=opts.batch_size,
image_height=opts.image_height,
image_width=opts.image_width,
in_feat_dim=opts.in_feat_dim,
in_label_dim=opts.in_label_dim,
num_of_classes=opts.num_of_classes)
#print(opts)
Network.generate_output(opts=opts)
|
[
"noreply@github.com"
] |
sara-kassani.noreply@github.com
|
84d3852ea9e37451d2df07cf5855edabe663ba12
|
754f71f70dfd6a22944d8d872c6d2f1d6983ac14
|
/tests/serial_frame_builder/test_miso_frame_builder.py
|
a78831ae697787e71eac1215a585a220bf59fbf5
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
Sensirion/python-shdlc-driver
|
052685da8db5629fa5929da65000210db82358e7
|
31e9683c27004ee05edf89996d656bc50f5bdb3a
|
refs/heads/master
| 2021-06-10T10:35:47.299481
| 2021-03-19T08:47:12
| 2021-03-19T08:47:12
| 144,961,065
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,667
|
py
|
# -*- coding: utf-8 -*-
# (c) Copyright 2019 Sensirion AG, Switzerland
from __future__ import absolute_import, division, print_function
from sensirion_shdlc_driver.serial_frame_builder import \
ShdlcSerialMisoFrameBuilder
from sensirion_shdlc_driver.errors import ShdlcResponseError
import pytest
def test_initial_data_empty():
"""
Test if the initial value and type of the "data" property is correct.
"""
builder = ShdlcSerialMisoFrameBuilder()
assert type(builder.data) is bytearray
assert len(builder.data) == 0
def test_initial_start_received_false():
"""
Test if the initial value and type of the "start_received" property is
correct.
"""
builder = ShdlcSerialMisoFrameBuilder()
assert type(builder.start_received) is bool
assert builder.start_received is False
def test_add_data_appends():
"""
Test if the "add_data()" method appends the passed data to the object.
"""
builder = ShdlcSerialMisoFrameBuilder()
builder.add_data(b"\x00\x01\x02")
assert builder.data == b"\x00\x01\x02"
builder.add_data(b"\x03\x04\x05")
assert builder.data == b"\x00\x01\x02\x03\x04\x05"
builder.add_data(b"\xfd\xfe\xff")
assert builder.data == b"\x00\x01\x02\x03\x04\x05\xfd\xfe\xff"
def test_add_data_raises_if_max_length_reached():
"""
Test if the "add_data()" method raises an ShdlcResponseError if no valid
frame is contained and the maximum frame length is reached.
"""
builder = ShdlcSerialMisoFrameBuilder()
builder.add_data(b"\x00" * 500)
with pytest.raises(ShdlcResponseError):
builder.add_data(b"\x00" * 23)
def test_add_data():
"""
Test if return type and value of the "add_data()" method is correct.
"""
builder = ShdlcSerialMisoFrameBuilder()
assert type(builder.add_data(b"")) is bool
assert builder.add_data(b"") is False
assert builder.add_data(b"\x00\x01\x02") is False # some rubbish
assert builder.add_data(b"\x7e\x00\x00") is False # frame START
assert builder.add_data(b"\x00\x00\x7e") is True # frame STOP
assert builder.add_data(b"\x00\x01\x02") is True # some rubbish
def test_initial_start_received():
"""
Test if the return value of the "start_received" property is correct after
adding data with "add_data()".
"""
builder = ShdlcSerialMisoFrameBuilder()
builder.add_data(b"\x00\x01\x02") # some rubbish
assert builder.start_received is False
builder.add_data(b"\x7e\x00\x00") # frame START
assert builder.start_received is True
builder.add_data(b"\x00\x00\x7e") # frame STOP
assert builder.start_received is True
builder.add_data(b"\x00\x01\x02") # some rubbish
assert builder.start_received is True
@pytest.mark.parametrize("raw,exp_addr,exp_cmd,exp_state,exp_data", [
pytest.param(b"\x7e\x00\x00\x00\x00\xff\x7e",
0x00,
0x00,
0x00,
b"",
id="all_zeros_nodata"),
pytest.param(b"\x7e\x00\x00\x00\xff" + b"\x00" * 255 + b"\x00\x7e",
0x00,
0x00,
0x00,
b"\x00" * 255,
id="all_zeros_withdata"),
pytest.param(b"\x7e\xff\xff\xff\xff" + b"\xff" * 255 + b"\x02\x7e",
0xFF,
0xFF,
0xFF,
b"\xff" * 255,
id="all_0xFF_withdata"),
pytest.param(b"\x7e\x7d\x5e\x7d\x5d\x7d\x31\x03\x12\x7d\x33\x14\xb7\x7e",
0x7e,
0x7d,
0x11,
b"\x12\x13\x14",
id="byte_stuffing_in_address_command_state_and_data"),
pytest.param(b"\x7e\x00\x01\x00\xff" + b"\x7d\x5e" * 255 + b"\x7d\x5d\x7e",
0x00,
0x01,
0x00,
b"\x7e" * 255,
id="byte_stuffing_in_data_and_checksum"),
])
def test_interpret_data_valid(raw, exp_addr, exp_cmd, exp_state, exp_data):
"""
Test if return type and value of the "interpret_data()" method is correct.
"""
builder = ShdlcSerialMisoFrameBuilder()
assert builder.add_data(raw) is True
recv_addr, recv_cmd, recv_state, recv_data = builder.interpret_data()
assert type(recv_addr) is int
assert type(recv_cmd) is int
assert type(recv_state) is int
assert type(recv_data) is bytes
assert recv_addr == exp_addr
assert recv_cmd == exp_cmd
assert recv_state == exp_state
assert recv_data == exp_data
@pytest.mark.parametrize("raw", [
pytest.param(b"\x7e\x7e",
id="empty"),
pytest.param(b"\x7e\x00\x00\x00\xff\x7e",
id="too_short"),
pytest.param(b"\x7e\x00\x00\x00\xff" + b"\x00" * 256 + b"\x00\x7e",
id="too_long"),
pytest.param(b"\x7e\x00\x00\x00\x01\xfe\x7e",
id="too_less_data"),
pytest.param(b"\x7e\x00\x00\x00\x00\x00\xff\x7e",
id="too_much_data"),
pytest.param(b"\x7e\x00\x00\x00\x00\xfe\x7e",
id="nodata_wrong_checksum"),
pytest.param(b"\x7e\xff\xff\xff\xff" + b"\xff" * 255 + b"\x00\x7e",
id="all_0xFF_wrong_checksum"),
])
def test_interpret_data_invalid(raw):
"""
Test if "interpret_data()" raises an ShdlcResponseError on invalid data.
"""
builder = ShdlcSerialMisoFrameBuilder()
assert builder.add_data(raw) is True
with pytest.raises(ShdlcResponseError):
builder.interpret_data()
|
[
"urban.bruhin@sensirion.com"
] |
urban.bruhin@sensirion.com
|
123b1cbb1b81c70709c950c532951eaeed017c86
|
1523e2fff267279bbf99a44a71b7482081dd1141
|
/The_Watch/The_Watch/wsgi.py
|
2cbac8cc2093ca421a722294adc2ee44bfc89a4c
|
[
"MIT"
] |
permissive
|
Kipngetich33/The-Watch
|
4c77f5e365553ab5af9b7a9c4a5bea71139d47c0
|
96e39937c0015eae749836f6215d60ae5cb86e51
|
refs/heads/master
| 2021-05-02T07:20:46.854250
| 2018-02-12T08:37:36
| 2018-02-12T08:37:36
| 120,872,467
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 396
|
py
|
"""
WSGI config for The_Watch project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "The_Watch.settings")
application = get_wsgi_application()
|
[
"khalifngeno@gmail.com"
] |
khalifngeno@gmail.com
|
6fc833e1360cd1461a185e6418da611f9ec80004
|
f10db3b11131ddf2bf5026e42cdd72c275e49693
|
/ToolsX/leetcode/0069/0069_4.py
|
fc86c98645998a5d503330fc7b69982f3ce3ac41
|
[] |
no_license
|
JunLei-MI/PythonX
|
36def40e33c9ebb64ce28af2b5da010393b08356
|
efea806d49f07d78e3db0390696778d4a7fc6c28
|
refs/heads/master
| 2023-04-07T10:58:45.647430
| 2021-01-25T16:54:37
| 2021-04-15T13:41:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,166
|
py
|
class Solution:
def mySqrt(self, x: int) -> int:
"""
从 0 到 n 肯定不行,可以优化为到 n/2
1
只计算一次,依然超时,只好用二分的了
2
一定要注意验算 0 和 1
注意 while 和条件的变化
如果是 low <= high,那么后面 low=mid 要 +1,high=mid 要 -1
最后退出循环时,high 比 low 小 1,返回 high
3
二分
4
位运算,从高到低求值
https://leetcode.com/problems/sqrtx/discuss/25048/Share-my-O(log-n)-Solution-using-bit-manipulation
>>> Solution().mySqrt(1060472158)
32564
"""
answer = 0
bit = 1 << 15 # 假设是32位 int 所以从 16 位开始
while bit > 0:
answer |= bit # 将这一位设为 1
if answer * answer > x: # 说明加上这一位的 1 就大了,说明不能加,恢复
answer ^= bit # bit 只有最高位为 1,异或将 answer 这一位置为 0
bit >>= 1
return answer
if __name__ == '__main__':
import doctest
doctest.testmod(verbose=True)
|
[
"pingfangx@pingfangx.com"
] |
pingfangx@pingfangx.com
|
3fbef31ab44f7f7928253701aacca5637318f44b
|
e267d1dbb7eb7cad239b18cffe6ddc53ae45aa9a
|
/tests/test_validators.py
|
f4ff4da249c4db81979e8286293a8a41471d1559
|
[] |
no_license
|
papercapp/DisposableEmailChecker
|
038fa91f60f2798d687ca846d5836200af30f624
|
60f055f9102a4f9e967d740e4446e5c7ac76c351
|
refs/heads/master
| 2020-05-29T08:52:05.319215
| 2015-11-04T02:00:38
| 2015-11-04T02:00:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 732
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
from django.test import TestCase
from django.core.exceptions import ValidationError
from disposable_email_checker import validators
from disposable_email_checker.emails import email_domain_loader
class TestDisposableEmailValidator(TestCase):
def setUp(self):
self.disposable_email = "fake.mcfakerston@{domain}".format(
domain=random.choice(email_domain_loader())
)
self.not_a_disposable_email = "sergey.brin@google.com"
def test_validator(self):
self.assertRaises(ValidationError, validators.validate_disposable_email, self.disposable_email)
validators.validate_disposable_email(self.not_a_disposable_email)
|
[
"me@aaronbassett.com"
] |
me@aaronbassett.com
|
02aea388baeecdf450749332637825ef25ee1e47
|
dce2e3b11804fdb141feaa48299fa8cd751f0e5d
|
/1154.一年中的第几天.py
|
a811e8c92ef10d247014b84f42c7884b8caf4f93
|
[] |
permissive
|
Cosmos-Break/leetcode
|
bf056efb6f3eb6448df7fb3fc4869992a3e7eb48
|
9f5f3d24e35b0a482ed40594ea665e9068324dcc
|
refs/heads/main
| 2023-06-26T04:29:25.135826
| 2021-07-19T12:29:29
| 2021-07-19T12:29:29
| 293,397,157
| 0
| 0
|
MIT
| 2020-09-07T01:55:39
| 2020-09-07T01:55:38
| null |
UTF-8
|
Python
| false
| false
| 427
|
py
|
#
# @lc app=leetcode.cn id=1154 lang=python3
#
# [1154] 一年中的第几天
#
# @lc code=start
class Solution:
def dayOfYear(self, data: str) -> int:
year = int(data[0:4])
month = int(data[5:7])
day = int(data[8:])
dic = [31,28,31,30,31,30,31,31,30,31,30,31]
if year%400==0 or year%4==0 and year%100!=0:
dic[1]=29
return sum(dic[:month-1])+day
# @lc code=end
|
[
"438854233@qq.com"
] |
438854233@qq.com
|
15c60558a5d48ed336761321bdefd509bf9ccd07
|
3185dc605853fdaf942fd06e206225793b198638
|
/剑指offer/No20_表示数值的字符串.py
|
a60b824774b72020c1d40e4aef19394be63143f8
|
[] |
no_license
|
fank-cd/books_exercise_code
|
cb81ee8ec8167a5f5e3bfc58d3c1d6d931ca9286
|
1e8109adb82f741df1203658d4bf272f09a651b8
|
refs/heads/master
| 2021-07-11T01:15:11.980179
| 2020-06-29T04:01:53
| 2020-06-29T04:01:53
| 156,671,075
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,444
|
py
|
# 面试题20:表示数值的字符串
# 题目:请实现一个函数用来判断字符串是否表示数值(包括整数和小数)。
# 例如,字符串"+100"、"5e2"、"-123"、"3.1416"、及"-1E-16"都表示
# 数值,但"12E"、"1a3.14"、"1.2.3"、"+-5"及"12e+5.4"都不是。
# # 读不懂题意,留下代码,暂时空着
def is_numeric(string):
if not isinstance(string, str):
return False
index = 0
result, index = scan_integer(string, index)
if index < len(string) and string[index] == '.':
index += 1
has_float, index = scan_unsigned_integer(string, index)
result = result or has_float
if index < len(string) and string[index] in ('e', 'E'):
index += 1
has_exp, index = scan_integer(string, index)
result = result and has_exp
return result and index == len(string)
def scan_integer(string, index):
if index < len(string) and string[index] in ('-', '+'):
index += 1
return scan_unsigned_integer(string, index)
def scan_unsigned_integer(string, index):
old_index = index
while index < len(string) and string[index] in '0123456789':
index += 1
return (old_index != index), index
if __name__ == "__main__":
print(is_numeric("+100"))
print(is_numeric("5e2"))
print(is_numeric("-200"))
print(is_numeric("3.1415926"))
print(is_numeric("1.34e-2"))
print(is_numeric("1.34e"))
|
[
"2464512446@qq.com"
] |
2464512446@qq.com
|
35c16b5dd609e24fbc243144ddcb65eef3a54569
|
71aea3429ecb5b4ccf415078809654b6e97c2cb6
|
/server/config.py
|
f91344f2181cace25b677f057fdaf6951c423276
|
[
"MIT"
] |
permissive
|
Nukesor/spacesurvival
|
dcbb8f0441c23367cd4c32beb260e336d8de06a7
|
1b02f2027f172ebbbf4f944641b7f0b5d0b5bb92
|
refs/heads/master
| 2021-01-19T09:27:03.809556
| 2017-12-04T13:03:17
| 2017-12-04T13:03:17
| 82,110,806
| 2
| 0
| null | 2017-11-20T13:16:30
| 2017-02-15T21:54:37
|
Rust
|
UTF-8
|
Python
| false
| false
| 1,274
|
py
|
"""Various configs for different environments."""
from datetime import timedelta
class BaseConfig:
"""Base config."""
DEBUG = False
SECRET_KEY = 'lolololol'
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_DATABASE_URI = 'postgres://localhost/browsergame'
AUTH_TOKEN_TIMEOUT = timedelta(days=365)
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USERNAME = 'username'
MAIL_PASSWORD = 'password'
PASSLIB_SCHEMES = ["argon2"]
SECURITY_CONFIRMABLE = True
SECURITY_TRACKABLE = True
MODULE_FILE_PATH = "server/data/module_data.json"
RESEARCH_FILE_PATH = "server/data/research_data.json"
CORS_ALLOW_ORIGIN = ''
CORS_ALLOW_METHODS = ''
CORS_ALLOW_HEADERS = ''
class DevConfig(BaseConfig):
"""Develop config."""
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'postgres://localhost/browsergame-dev'
class TestConfig(BaseConfig):
"""Testing config."""
SQLALCHEMY_DATABASE_URI = 'postgres://localhost/browsergame-test'
class ProdConfig(BaseConfig):
"""Production config."""
SQLALCHEMY_DATABASE_URI = 'postgres://localhost/browsergame'
AUTH_TOKEN_TIMEOUT = 30 * 12 * 30 * 24 * 3600
configs = {
'develop': DevConfig,
'testing': TestConfig,
'production': ProdConfig,
}
|
[
"arne@twobeer.de"
] |
arne@twobeer.de
|
94f7bb0c107ba916893a8ac8be11f4eaab3b3588
|
f1738cd603e0b2e31143f4ebf7eba403402aecd6
|
/ucs/base/univention-updater/conffiles/15_ucs-online-version.py
|
69852d3acc488fb8ccf3b4f613225d51383ef948
|
[] |
no_license
|
m-narayan/smart
|
92f42bf90d7d2b24f61915fac8abab70dd8282bc
|
1a6765deafd8679079b64dcc35f91933d37cf2dd
|
refs/heads/master
| 2016-08-05T17:29:30.847382
| 2013-01-04T04:50:26
| 2013-01-04T04:50:26
| 7,079,786
| 8
| 6
| null | 2015-04-29T08:54:12
| 2012-12-09T14:56:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,777
|
py
|
# Copyright (C) 2011-2012 Univention GmbH
#
# http://www.univention.de/
#
# All rights reserved.
#
# The source code of this program is made available
# under the terms of the GNU Affero General Public License version 3
# (GNU AGPL V3) as published by the Free Software Foundation.
#
# Binary versions of this program provided by Univention to you as
# well as other copyrighted, protected or trademarked materials like
# Logos, graphics, fonts, specific documentations and configurations,
# cryptographic keys etc. are subject to a license agreement between
# you and Univention and not subject to the GNU AGPL V3.
#
# In the case you use this program under the terms of the GNU AGPL V3,
# the program is provided in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License with the Debian GNU/Linux or Univention distribution in file
# /usr/share/common-licenses/AGPL-3; if not, see
# <http://www.gnu.org/licenses/>.
import os, shutil
FILE_NAME='/etc/apt/sources.list.d/15_ucs-online-version.list'
def preinst(baseConfig, changes):
if os.path.exists('%s.old' % FILE_NAME):
os.remove('%s.old' % FILE_NAME)
if os.path.exists(FILE_NAME):
shutil.copyfile('%s' % FILE_NAME, '%s.old' % FILE_NAME)
def postinst(baseConfig, changes):
if os.path.exists(FILE_NAME):
res=open(FILE_NAME, 'r').readlines()
if len(res) <= 1:
os.remove(FILE_NAME)
if os.path.exists('%s.old' % FILE_NAME):
shutil.copyfile('%s.old' % FILE_NAME, '%s' % FILE_NAME)
if os.path.exists('%s.old' % FILE_NAME):
os.remove('%s.old' % FILE_NAME)
pass
|
[
"kartik@debian.org"
] |
kartik@debian.org
|
ac1f4677532bd69943d43bfac731b473a9f32705
|
41e2cf24f0ff3a11a98bb00e03c598dde35452c4
|
/project/migrations/0009_googleapisetup.py
|
f2a0baa279dd151205113e1a9a0a64bb2a0691f5
|
[] |
no_license
|
anushamokashi/mob
|
f5dbedc729073092f94323feca6d95dee24087a2
|
37bc0eb033bc23d37e9d4fb9bb8b2b456553ff7f
|
refs/heads/master
| 2020-04-24T08:36:56.008212
| 2019-02-21T09:09:04
| 2019-02-21T09:09:04
| 171,810,613
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,355
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-08-31 10:27
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('project', '0008_project_table_append_by_underscore'),
]
operations = [
migrations.CreateModel(
name='GoogleAPISetup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('apikey', models.CharField(blank=True, max_length=200, null=True)),
('clientid', models.CharField(blank=True, max_length=200, null=True)),
('project_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='project.Project')),
],
options={
'ordering': ('-modified', '-created'),
'abstract': False,
'get_latest_by': 'modified',
},
),
]
|
[
"anusha.mokashi@gmail.com"
] |
anusha.mokashi@gmail.com
|
ad02f8785f62b23517182467691e772ea5ff368c
|
981fbe20320ce16e5746c3d492545fbd30bcef02
|
/screen_cap/http_screen_cap.py
|
1e3c46dd41dba62f9d329daa7ebf9789613794af
|
[] |
no_license
|
jinjin123/zabbix-api
|
f73e32c3433356c19df623066d457f5d7e0709e6
|
471116d0dcd5074b1047d4065c87e7f32c9aa9ff
|
refs/heads/master
| 2021-01-25T06:45:16.371094
| 2017-07-26T12:23:39
| 2017-07-26T12:23:39
| 93,605,146
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,641
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# task argument to get the mapping grapth
#hostid and graphid its need , 每个host的id都不一样 ,从hostid 往下级拿graphid hostid=xx&graphid=xx&
import json, traceback
import datetime
import cookielib, urllib2,urllib
import time
class ZabbixGraph():
def __init__(self,url="http://172.16.102.128:81/index.php",name="admin",password="zabbix"):
self.url=url
self.name=name
self.passwd=password
#初始化的时候生成cookies
cookiejar = cookielib.CookieJar()
urlOpener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookiejar))
values = {"name":self.name,'password':self.passwd,'autologin':1,"enter":'Sign in'}
data = urllib.urlencode(values)
request = urllib2.Request(url, data)
try:
urlOpener.open(request,timeout=10)
self.urlOpener=urlOpener
except urllib2.HTTPError, e:
print e
def GetGraph(self,url="http://172.16.102.128:81/chart2.php",values={'width': 800, 'height': 200, 'hostid': '', 'graphid': '', 'stime': time.strftime('%Y%m%d%H%M%S', time.localtime(time.time())), 'period': 3600},image_dir="/home/azureuser"):
data=urllib.urlencode(values)
request = urllib2.Request(url,data)
url = self.urlOpener.open(request)
image = url.read()
imagename="%s/%s_%s_%s.jpg" % (image_dir, values["hostid"], values["graphid"], values["stime"])
#imagename="%s/%s_%s.jpg" % (image_dir, values["graphid"], values["stime"])
f=open(imagename,'wb')
f.write(image)
return '1'
if __name__ == "__main__":
#hostid = ['10107','10108','10109','10110','10111','10112']
hostid = ['10107','10108']
#graphidm = ['594','566','566','594','601','608']
graphidm = ['594','566']
graphidd = ['624','643']
#graphidd = ['624','643','','','','','']
graph = ZabbixGraph()
stime = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))
values = {'width': 800, 'height': 200, 'hostid': '10107', 'graphid': '594', 'stime': stime, 'period': 300}
graph.GetGraph("http://172.16.102.128:81/chart2.php",values,"/root/screen")
#for h in hostid:
# for m in graphidm:
# values = {'width': 800, 'height': 200, 'hostid': h, 'graphid': m, 'stime': stime, 'period': 300}
# graph.GetGraph("http://172.16.102.128:81/chart2.php",values,"/root/screen")
#for d in graphidd:
# values = {'width': 800, 'height': 200, 'hostid': h, 'graphid': d, 'stime': stime, 'period': 300}
# graph.GetGraph("http://172.16.102.128:81/chart2.php",values,"/root/screen")
|
[
"1293813551@qq.com"
] |
1293813551@qq.com
|
bf42f669890aa2afb5de8d642415984eadf63771
|
60a6ba6e5f3faca2b1e17c1e90917efc3cfc561a
|
/aoc2018/day7/day7_part2.py
|
675c2a0599f50e1f486089a078f71bc1a088a2c2
|
[
"MIT"
] |
permissive
|
GetPastTheMonkey/advent-of-code
|
f462f5e2b72d913e39484446ce92a043d455091c
|
7a5ee30dbafaf8ef6f9bf9936e484efd024aa308
|
refs/heads/master
| 2023-01-14T09:45:00.553575
| 2022-12-25T10:59:19
| 2022-12-25T13:00:44
| 160,684,715
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,782
|
py
|
from os.path import join, dirname, realpath
from re import match
# Specify number of workers
worker_count = 5
workers = [{
"task": None,
"remaining": 0
} for _ in range(worker_count)]
# Load file
tasks = dict()
for i in range(ord("A"), ord("Z")+1):
tasks[chr(i)] = dict()
tasks[chr(i)]["requirements"] = []
tasks[chr(i)]["duration"] = 60 + (i - 64) # 60 + position of character in alphabet -> A = 60+1, B = 60+2, ...
tasks[chr(i)]["has_worker"] = False
with open(join(dirname(realpath(__file__)), "input.txt")) as f:
for line in f:
m = match("^Step (?P<req>[A-Z]) must be finished before step (?P<step>[A-Z]) can begin\.$", line)
step = m.group("step")
reqs = m.group("req")
tasks[step]["requirements"].append(reqs)
def find_empty_tasks(req):
empty_list = []
for key, data in req.items():
if not data["requirements"] and not data["has_worker"]:
empty_list.append(key)
empty_list.sort()
return empty_list
def distribute_work(req, w):
empty_tasks = find_empty_tasks(req)
if empty_tasks:
print("[ITERATION {}] - Tasks with empty requirements: {}".format(iterations, empty_tasks))
for worker in w:
# If the worker is idle and there is still an empty task, then work on it
if worker["task"] is None and len(empty_tasks) > 0:
t = empty_tasks.pop(0)
worker["task"] = t
worker["remaining"] = req[t]["duration"]
req[t]["has_worker"] = True
return req, w
def do_work(w):
for worker in w:
if worker["task"] is not None:
worker["remaining"] -= 1
def remove_finished_tasks(req, w):
removed_tasks = []
# Loop through workers and remove finished tasks
for worker in w:
if worker["task"] is not None and worker["remaining"] == 0:
# Remove task from req dict
print("[ITERATION {}] - Finished task {}".format(iterations, worker["task"]))
req.pop(worker["task"])
removed_tasks.append(worker["task"])
worker["task"] = None
# Create new task dict
new_tasks = dict()
for key, value in req.items():
new_tasks[key] = {
"requirements": [],
"duration": value["duration"],
"has_worker": value["has_worker"]
}
for r in value["requirements"]:
if r not in removed_tasks:
new_tasks[key]["requirements"].append(r)
return new_tasks, w
iterations = 0
while tasks:
tasks, workers = distribute_work(tasks, workers)
do_work(workers)
iterations += 1
tasks, workers = remove_finished_tasks(tasks, workers)
print("Finished after {} iterations (with {} workers)".format(iterations, worker_count))
|
[
"sven.gruebel@gmx.ch"
] |
sven.gruebel@gmx.ch
|
f5d4cf6f485d762c5643ead19f6f44edcc5d2d96
|
0485a490f466bd1d02eaae96d277888781208c0e
|
/tests/single_instruction_translation_validation/mcsema/register-variants/movb_r8_rh/Output/test-z3.py
|
e85a3ad441dea371dd1ab92ebdf22d518b6ae522
|
[
"LicenseRef-scancode-unknown-license-reference",
"NCSA"
] |
permissive
|
Mthandazo42/validating-binary-decompilation
|
c0e2d54cd79e609bfa35802975bddfa52e646fad
|
c0fcd6f099e38195dcbbac9e8c13a825865c5cb5
|
refs/heads/master
| 2022-11-11T13:18:13.033044
| 2020-06-25T05:49:01
| 2020-06-25T05:49:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,269
|
py
|
#############################################
######## Auto Generated Proof Scripts #######
#############################################
import z3
import sys
status=True
test_name="UnK"
if(len(sys.argv) > 1):
test_name = sys.argv[1]
def solve(msg, lvar, xvar, s):
global status
s.set("timeout", 60000)
res = s.check()
if(z3.unknown == res):
print(test_name + "::" + msg + "::unk")
status = "Unknown"
if(z3.sat == res):
if("UNDEF" in xvar.sexpr()):
print(test_name + "::" + msg + "::undef-sat")
else:
m = s.model()
print(test_name + "::" + msg + "::sat")
print("\n")
print("query", s)
print("\n")
print("model", m)
print("\n")
print("xvar =", m.evaluate(xvar))
print("lvar =", m.evaluate(lvar))
print("\n")
status = False
##############################
## X86 specific variables ####
##############################
### GPRs
VX_RAX = z3.BitVec('VX_RAX',64)
VX_RBX = z3.BitVec('VX_RBX',64)
VX_RCX = z3.BitVec('VX_RCX',64)
VX_RDX = z3.BitVec('VX_RDX',64)
VX_RSI = z3.BitVec('VX_RSI',64)
VX_RDI = z3.BitVec('VX_RDI',64)
### Flags
VX_CF = z3.BitVec('VX_CF',1)
VX_PF = z3.BitVec('VX_PF',1)
VX_ZF = z3.BitVec('VX_ZF',1)
VX_SF = z3.BitVec('VX_SF',1)
VX_AF = z3.BitVec('VX_AF',1)
VX_OF = z3.BitVec('VX_OF',1)
### YMM Registers
VX_YMM1 = z3.BitVec('VX_YMM1', 256)
VX_YMM2 = z3.BitVec('VX_YMM2', 256)
## Undef
VX_UNDEF_1 = z3.BitVec('VX_UNDEF_1', 1)
VX_UNDEF_BOOL = z3.Bool('VX_UNDEF_BOOL')
##############################
## X86 specific variables ####
##############################
### GPRs
VL_RAX = z3.BitVec('VL_RAX',64)
VL_RBX = z3.BitVec('VL_RBX',64)
VL_RCX = z3.BitVec('VL_RCX',64)
VL_RDX = z3.BitVec('VL_RDX',64)
VL_RSI = z3.BitVec('VL_RSI',64)
VL_RDI = z3.BitVec('VL_RDI',64)
### Flags
VL_CF = z3.BitVec('VL_CF',8)
VL_PF = z3.BitVec('VL_PF',8)
VL_ZF = z3.BitVec('VL_ZF',8)
VL_SF = z3.BitVec('VL_SF',8)
VL_AF = z3.BitVec('VL_AF',8)
VL_OF = z3.BitVec('VL_OF',8)
### YMM Registers
VL_YMM1_0 = z3.BitVec('VL_YMM1_0', 64)
VL_YMM1_1 = z3.BitVec('VL_YMM1_1', 64)
VL_YMM1_2 = z3.BitVec('VL_YMM1_2', 64)
VL_YMM1_3 = z3.BitVec('VL_YMM1_3', 64)
VL_YMM2_0 = z3.BitVec('VL_YMM2_0', 64)
VL_YMM2_1 = z3.BitVec('VL_YMM2_1', 64)
VL_YMM2_2 = z3.BitVec('VL_YMM2_2', 64)
VL_YMM2_3 = z3.BitVec('VL_YMM2_3', 64)
##############################
## Proof variables ###########
##############################
V_R = z3.BitVec('V_R',64)
V_F = z3.BitVec('V_F',1)
V_Y = z3.BitVec('V_Y',256)
## Solver instance
s = z3.Solver()
##############################
## Default constraints #######
##############################
### GPRs
s.add(VX_RAX == VL_RAX)
s.add(VX_RBX == VL_RBX)
s.add(VX_RCX == VL_RCX)
s.add(VX_RDX == VL_RDX)
s.add(VX_RDI == VL_RDI)
s.add(VX_RSI == VL_RSI)
### Flags
s.add(z3.Or(VL_CF == 0, VL_CF == 1))
s.add(z3.Or(VL_ZF == 0, VL_ZF == 1))
s.add(z3.Or(VL_PF == 0, VL_PF == 1))
s.add(z3.Or(VL_SF == 0, VL_SF == 1))
s.add(z3.Or(VL_AF == 0, VL_AF == 1))
s.add(z3.Or(VL_OF == 0, VL_OF == 1))
s.add(z3.Extract(0,0, VL_CF) == VX_CF)
s.add(z3.Extract(0,0, VL_SF) == VX_SF)
s.add(z3.Extract(0,0, VL_ZF) == VX_ZF)
s.add(z3.Extract(0,0, VL_PF) == VX_PF)
s.add(z3.Extract(0,0, VL_AF) == VX_AF)
s.add(z3.Extract(0,0, VL_OF) == VX_OF)
### Ymms
s.add(z3.Concat(VL_YMM1_3, VL_YMM1_2, VL_YMM1_1, VL_YMM1_0) == VX_YMM1)
s.add(z3.Concat(VL_YMM2_3, VL_YMM2_2, VL_YMM2_1, VL_YMM2_0) == VX_YMM2)
## =******= AF =******=
s.push()
lvar = (V_F == z3.Extract(0, 0, z3.Extract(7, 0, VL_AF)))
xvar = (V_F == VX_AF)
s.add(lvar != xvar)
solve("AF", lvar, xvar, s)
s.pop()
## =******= CF =******=
s.push()
lvar = (V_F == z3.Extract(0, 0, z3.Extract(7, 0, VL_CF)))
xvar = (V_F == VX_CF)
s.add(lvar != xvar)
solve("CF", lvar, xvar, s)
s.pop()
## =******= OF =******=
s.push()
lvar = (V_F == z3.Extract(0, 0, z3.Extract(7, 0, VL_OF)))
xvar = (V_F == VX_OF)
s.add(lvar != xvar)
solve("OF", lvar, xvar, s)
s.pop()
## =******= PF =******=
s.push()
lvar = (V_F == z3.Extract(0, 0, z3.Extract(7, 0, VL_PF)))
xvar = (V_F == VX_PF)
s.add(lvar != xvar)
solve("PF", lvar, xvar, s)
s.pop()
## =******= RAX =******=
s.push()
lvar = (V_R == z3.Concat(z3.Extract(63, 56, VL_RAX), z3.Extract(55, 48, VL_RAX), z3.Extract(47, 40, VL_RAX), z3.Extract(39, 32, VL_RAX), z3.Extract(31, 24, VL_RAX), z3.Extract(23, 16, VL_RAX), z3.Extract(15, 8, VL_RAX), z3.Extract(7, 0, VL_RAX)))
xvar = (V_R == VX_RAX)
s.add(lvar != xvar)
solve("RAX", lvar, xvar, s)
s.pop()
## =******= RBX =******=
s.push()
lvar = (V_R == z3.Concat(z3.Extract(63, 56, VL_RBX), z3.Extract(55, 48, VL_RBX), z3.Extract(47, 40, VL_RBX), z3.Extract(39, 32, VL_RBX), z3.Extract(31, 24, VL_RBX), z3.Extract(23, 16, VL_RBX), z3.Extract(15, 8, VL_RBX), z3.Extract(7, 0, (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(15, 8, VL_RAX)) & z3.BitVecVal(256 - 1, 64)))))
xvar = (V_R == z3.Concat(z3.Extract(63, 8, VX_RBX), z3.Extract(15, 8, VX_RAX)))
s.add(lvar != xvar)
solve("RBX", lvar, xvar, s)
s.pop()
## =******= RCX =******=
s.push()
lvar = (V_R == z3.Concat(z3.Extract(63, 56, VL_RCX), z3.Extract(55, 48, VL_RCX), z3.Extract(47, 40, VL_RCX), z3.Extract(39, 32, VL_RCX), z3.Extract(31, 24, VL_RCX), z3.Extract(23, 16, VL_RCX), z3.Extract(15, 8, VL_RCX), z3.Extract(7, 0, VL_RCX)))
xvar = (V_R == VX_RCX)
s.add(lvar != xvar)
solve("RCX", lvar, xvar, s)
s.pop()
## =******= RDX =******=
s.push()
lvar = (V_R == z3.Concat(z3.Extract(63, 56, VL_RDX), z3.Extract(55, 48, VL_RDX), z3.Extract(47, 40, VL_RDX), z3.Extract(39, 32, VL_RDX), z3.Extract(31, 24, VL_RDX), z3.Extract(23, 16, VL_RDX), z3.Extract(15, 8, VL_RDX), z3.Extract(7, 0, VL_RDX)))
xvar = (V_R == VX_RDX)
s.add(lvar != xvar)
solve("RDX", lvar, xvar, s)
s.pop()
## =******= SF =******=
s.push()
lvar = (V_F == z3.Extract(0, 0, z3.Extract(7, 0, VL_SF)))
xvar = (V_F == VX_SF)
s.add(lvar != xvar)
solve("SF", lvar, xvar, s)
s.pop()
## =******= ZF =******=
s.push()
lvar = (V_F == z3.Extract(0, 0, z3.Extract(7, 0, VL_ZF)))
xvar = (V_F == VX_ZF)
s.add(lvar != xvar)
solve("ZF", lvar, xvar, s)
s.pop()
if(status == True):
print('[6;30;42m' + 'Test-Pass: ' + '[0m' + test_name)
else:
if(status == False):
print('[0;30;41m' + 'Test-Fail: ' + '[0m' + test_name)
else:
print('[6;30;47m' + 'Test-Unk: ' + '[0m' + test_name)
|
[
"sdasgup3@illinois.edu"
] |
sdasgup3@illinois.edu
|
0ccb62474a0317f86dfe9138ec3b8c5878be2948
|
fb00b570251ba52df467e4cc030a30e778f8a970
|
/Atividade 02 - semana 04/questão4_semana4_atividade02_runcodes.py
|
a3048ea0063d9e885ce77e9effdf8b688eb5e1ef
|
[] |
no_license
|
SirLeonardoFerreira/Atividades-ifpi
|
7379f9df4640fd1ee3623d80e4341f495e855895
|
e366ee3f801dc9a1876c7399a2eefd37a03d0a55
|
refs/heads/master
| 2023-01-05T04:03:30.774277
| 2020-11-02T00:56:10
| 2020-11-02T00:56:10
| 287,967,575
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,426
|
py
|
def signo(dia, mes):
if (21 <= dia <= 31 and mes == 3) or (1 <= dia <= 19 and mes == 4):
return 'Áries'
elif (20 <= dia <= 30 and mes == 4) or (1 <= dia <= 20 and mes == 5):
return 'Touro'
elif (21 <= dia <= 31 and mes == 5) or (1 <= dia <= 21 and mes == 6):
return 'Gêmeos'
elif (22 <= dia <= 30 and mes == 6) or (1 <= dia <= 22 and mes == 7):
return 'Câncer'
elif (23 <= dia <= 31 and mes == 7) or (1 <= dia <= 22 and mes == 8):
return 'Leão'
elif (23 <= dia <= 31 and mes == 8) or (1 <= dia <= 22 and mes == 9):
return 'Virgem'
elif (23 <= dia <= 30 and mes == 9) or (1 <= dia <= 22 and mes == 10):
return 'Libra'
elif (23 <= dia <= 31 and mes == 10) or (1 <= dia <= 21 and mes == 11):
return 'Escorpião'
elif (22 <= dia <= 30 and mes == 11) or (1 <= dia <= 21 and mes == 12):
return 'Sagitário'
elif (22 <= dia <= 31 and mes == 12) or (1 <= dia <= 19 and mes == 1):
return 'Capricórnio'
elif (20 <= dia <= 31 and mes == 1) or (1 <= dia <= 18 and mes == 2):
return 'Aquário'
elif (19 <= dia <= 29 and mes == 2) or (1 <= dia <= 20 and mes == 3):
return 'Peixes'
def main():
dia_nascimento = int(input())
mes_nascimento = int(input())
mensagem_signo = signo(dia_nascimento, mes_nascimento)
print(f'{mensagem_signo}')
if __name__=='__main__':
main()
|
[
"lleoalves02@gmail.com"
] |
lleoalves02@gmail.com
|
6adaf26c83041f163d6f9002d77e24deeb133c0f
|
30ea9abff7438755bfc8a483ae843152d3e49b9b
|
/力扣习题/118杨辉三角/pascalstriangle.py
|
28285769718b8d071b795a07cd59ee1e588a6057
|
[
"MIT"
] |
permissive
|
houcy/AlgorithmLearning
|
2dee945a4f9fefc981020c365664bcd65e5994c4
|
92e3dd6ae8d27cd8fb1a3a7035b2f7e0eb86a7dc
|
refs/heads/master
| 2022-12-25T19:55:51.323740
| 2020-10-09T04:24:11
| 2020-10-09T04:24:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,049
|
py
|
class Solution:
'''
非递归算法
'''
def generate(self, numRows: int) -> [[]]:
res = []
if numRows == 1:
res.append([1])
elif numRows > 1:
# res.append([1])
for k in range(0, numRows):
item = []
for i in range(0, k + 1):
if i == 0 or i == k:
item.append(1)
else:
item.append(res[-1][i - 1] + res[-1][i])
res.append(item[:])
item.clear()
return res
s = Solution()
print(s.generate(5))
class Solution2:
'''
递归算法
'''
def generate(self, numRows: int) -> [[]]:
if numRows == 0:
return []
elif numRows == 1:
return [[1]]
else:
item = []
res = self.generate(numRows - 1)
item.append(1)
for i in range(1, numRows - 1):
item.append(res[-1][i - 1] + res[-1][i])
item.append(1)
res.append(item)
return res
class Solution3:
'''
满脑子骚操作
'''
def generate(self, numRows: int) -> [[]]:
res = [
[1],
[1,1],
[1,2,1],
[1,3,3,1],
[1,4,6,4,1],
[1,5,10,10,5,1],
[1,6,15,20,15,6,1],
[1,7,21,35,35,21,7,1],
[1,8,28,56,70,56,28,8,1],
[1,9,36,84,126,126,84,36,9,1],
[1,10,45,120,210,252,210,120,45,10,1],
[1,11,55,165,330,462,462,330,165,55,11,1],
[1,12,66,220,495,792,924,792,495,220,66,12,1],
[1,13,78,286,715,1287,1716,1716,1287,715,286,78,13,1],
[1,14,91,364,1001,2002,3003,3432,3003,2002,1001,364,91,14,1],
[1,15,105,455,1365,3003,5005,6435,6435,5005,3003,1365,455,105,15,1],
[1,16,120,560,1820,4368,8008,11440,12870,11440,8008,4368,1820,560,120,16,1],
[1,17,136,680,2380,6188,12376,19448,24310,24310,19448,12376,6188,2380,680,136,17,1],
[1,18,153,816,3060,8568,18564,31824,43758,48620,43758,31824,18564,8568,3060,816,153,18,1],
[1,19,171,969,3876,11628,27132,50388,75582,92378,92378,75582,50388,27132,11628,3876,969,171,19,1],
[1,20,190,1140,4845,15504,38760,77520,125970,167960,184756,167960,125970,77520,38760,15504,4845,1140,190,20,1],
[1,21,210,1330,5985,20349,54264,116280,203490,293930,352716,352716,293930,203490,116280,54264,20349,5985,1330,210,21,1],
[1,22,231,1540,7315,26334,74613,170544,319770,497420,646646,705432,646646,497420,319770,170544,74613,26334,7315,1540,231,22,1],
[1,23,253,1771,8855,33649,100947,245157,490314,817190,1144066,1352078,1352078,1144066,817190,490314,245157,100947,33649,8855,1771,253,23,1],
[1,24,276,2024,10626,42504,134596,346104,735471,1307504,1961256,2496144,2704156,2496144,1961256,1307504,735471,346104,134596,42504,10626,2024,276,24,1],
[1,25,300,2300,12650,53130,177100,480700,1081575,2042975,3268760,4457400,5200300,5200300,4457400,3268760,2042975,1081575,480700,177100,53130,12650,2300,300,25,1],
[1,26,325,2600,14950,65780,230230,657800,1562275,3124550,5311735,7726160,9657700,10400600,9657700,7726160,5311735,3124550,1562275,657800,230230,65780,14950,2600,325,26,1],
[1,27,351,2925,17550,80730,296010,888030,2220075,4686825,8436285,13037895,17383860,20058300,20058300,17383860,13037895,8436285,4686825,2220075,888030,296010,80730,17550,2925,351,27,1],
[1,28,378,3276,20475,98280,376740,1184040,3108105,6906900,13123110,21474180,30421755,37442160,40116600,37442160,30421755,21474180,13123110,6906900,3108105,1184040,376740,98280,20475,3276,378,28,1],
[1,29,406,3654,23751,118755,475020,1560780,4292145,10015005,20030010,34597290,51895935,67863915,77558760,77558760,67863915,51895935,34597290,20030010,10015005,4292145,1560780,475020,118755,23751,3654,406,29,1]
]
return res[0:numRows]
|
[
"ab2defg145@gmail.com"
] |
ab2defg145@gmail.com
|
825accd3872929d9287bb3b4c66b0585d16507fe
|
350db570521d3fc43f07df645addb9d6e648c17e
|
/1299_Replace_Elements_with_Greatest_Element_on_Right_Side/solution.py
|
c1d77900854ca9a59cc3073bb3f87162f7eb586d
|
[] |
no_license
|
benjaminhuanghuang/ben-leetcode
|
2efcc9185459a1dd881c6e2ded96c42c5715560a
|
a2cd0dc5e098080df87c4fb57d16877d21ca47a3
|
refs/heads/master
| 2022-12-10T02:30:06.744566
| 2022-11-27T04:06:52
| 2022-11-27T04:06:52
| 236,252,145
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 269
|
py
|
'''
1299. Replace Elements with Greatest Element on Right Side
Level: Easy
https://leetcode.com/problems/replace-elements-with-greatest-element-on-right-side
'''
'''
Solution:
'''
class Solution:
def replaceElements(self, arr: List[int]) -> List[int]:
|
[
"bhuang@rms.com"
] |
bhuang@rms.com
|
ca5d2f735aaee931762726e44f8ffc69d56dab76
|
ddd35c693194aefb9c009fe6b88c52de7fa7c444
|
/Live 10.1.18/ATOM/channel_strip.py
|
fbd60c5b943b861e81fa7cbe0be8417f4de3f5ce
|
[] |
no_license
|
notelba/midi-remote-scripts
|
819372d9c22573877c7912091bd8359fdd42585d
|
e3ec6846470eed7da8a4d4f78562ed49dc00727b
|
refs/heads/main
| 2022-07-30T00:18:33.296376
| 2020-10-04T00:00:12
| 2020-10-04T00:00:12
| 301,003,961
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 989
|
py
|
# uncompyle6 version 3.7.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.8.5 (default, Aug 12 2020, 00:00:00)
# [GCC 10.2.1 20200723 (Red Hat 10.2.1-1)]
# Embedded file name: c:\Jenkins\live\output\Live\win_64_static\Release\python-bundle\MIDI Remote Scripts\ATOM\channel_strip.py
# Compiled at: 2020-05-05 13:23:28
from __future__ import absolute_import, print_function, unicode_literals
from ableton.v2.base import liveobj_valid
from ableton.v2.control_surface.components import ChannelStripComponent as ChannelStripComponentBase
class ChannelStripComponent(ChannelStripComponentBase):
empty_color = b'Mixer.EmptyTrack'
def _update_select_button(self):
if liveobj_valid(self._track) and self.song.view.selected_track == self._track:
self.select_button.color = b'Mixer.Selected'
else:
self.select_button.color = b'DefaultButton.Off'
# okay decompiling /home/deniz/data/projects/midiremote/Live 10.1.18/ATOM/channel_strip.pyc
|
[
"notelba@example.com"
] |
notelba@example.com
|
13b235a66727792736ec940ae4bc3cc630a0c1fb
|
d44215864e30ad8039a1a294875e4222e3d23ebd
|
/build/geometry-hydro-devel/tf/catkin_generated/pkg.installspace.context.pc.py
|
dbd62ce2cd49cece7fca3f4fcc8794848494ff9a
|
[] |
no_license
|
prathyusha-shine/abhiyan1.0
|
5c3eebfbbacb8b364180b9c2bd377c73cf29e693
|
bf9be6462c132465ddbf8c20b1e9a4e1eabd596e
|
refs/heads/master
| 2020-12-31T01:23:32.911145
| 2015-05-31T06:19:16
| 2015-05-31T06:19:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 540
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/sudha/catkin_ws/install/include".split(';') if "/home/sudha/catkin_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "geometry_msgs;message_filters;message_runtime;roscpp;sensor_msgs;std_msgs;tf2_ros;rosconsole".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-ltf".split(';') if "-ltf" != "" else []
PROJECT_NAME = "tf"
PROJECT_SPACE_DIR = "/home/sudha/catkin_ws/install"
PROJECT_VERSION = "1.10.8"
|
[
"sudha@sudha.(none)"
] |
sudha@sudha.(none)
|
6d1ebf41ab4811a8adc2865d675e4b20db67c5ee
|
bc547e7d9e4b2c1e49edc2daaa735c9afb87f5ae
|
/test/test_all_fault_handlers.py
|
49068eb0020cb52f7c87f865ccede508daaabba8
|
[
"MIT"
] |
permissive
|
farisachugthai/dynamic_ipython
|
f7ed092ff23b785fc8c545390c581338a64b9bda
|
7572a01f09998812830379644c45af4df67a3e45
|
refs/heads/master
| 2022-11-05T11:48:48.344585
| 2021-08-28T04:25:05
| 2021-08-28T04:25:05
| 178,786,145
| 7
| 0
|
MIT
| 2022-10-25T10:16:39
| 2019-04-01T04:35:37
|
Python
|
UTF-8
|
Python
| false
| false
| 1,785
|
py
|
#!/usr/bin/env python3
import os
import shutil
import tempfile
import unittest
from os.path import abspath, realpath, isfile, exists
import pytest
from IPython.testing.globalipapp import get_ipython
from default_profile.startup.all_fault_handlers import tempdir, in_tempdir, in_dir
def remove_tmpdir(dir):
try:
shutil.rmtree(dir)
except (NotADirectoryError, FileNotFoundError, OSError):
pass
except PermissionError:
raise
@pytest.fixture
def cwd():
return os.path.abspath(os.path.curdir)
class FixturesTest(unittest.TestCase):
def setUp(self):
# unittest's version of the tmpdir fixture
self.tmpdir = tempfile.mkdtemp()
self.addCleanup(remove_tmpdir, self.tmpdir)
# def test_rehashx_does_not_raise(self):
# are you allowed to do this?
# would something like this work
# with self.assertRaises(None):
# Wait this isn't a context manager??? hold the fuck up.
# with not self.assertRaises(Exception):
# get_ipython().run_line_magic('rehashx')
def test_tempdir():
with tempdir() as tmpdir:
fname = os.path.join(tmpdir, 'example_file.txt')
with open(fname, 'wt') as fobj:
fobj.write('a string\\n')
assert not exists(tmpdir)
def test_in_tempdir(cwd):
with in_tempdir() as tmpdir:
with open('test.txt', 'wt') as f:
f.write('some text')
assert isfile('test.txt')
assert isfile(os.path.join(tmpdir, 'test.txt'))
assert not exists(tmpdir)
def test_given_directory(cwd):
# Test InGivenDirectory
with in_dir(cwd) as tmpdir:
assert tmpdir == abspath(cwd)
with in_dir(cwd) as tmpdir:
assert tmpdir == cwd
if __name__ == "__main__":
unittest.main()
|
[
"farischugthai@gmail.com"
] |
farischugthai@gmail.com
|
aba1fe1222e36f72353fd0c6c5a21047cc2cedee
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03212/s618244450.py
|
185edf86b67bdc8518f2d9341edb2c2cdcd3ecfc
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 310
|
py
|
N = input()
L=len(N)
from itertools import product
check_num=[3,5,7]
check=[]
for l in range(1,L+1):
for p in product(range(3),repeat=l):
c=''
for p_ in p:
c+=str(check_num[p_])
if len(set(c))==3 and int(c)<=int(N):
check.append(int(c))
print(len(check))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
71687720ff526965a20c77c9db597830ce3187b5
|
714058081fe435ed89b94cfa94587338e64672cb
|
/marqeta/response_models/digital_wallet_token_hash.py
|
44fe321135afec4b24ddcf5ac0ed83bccebdd7f4
|
[
"MIT"
] |
permissive
|
andyw8/marqeta-python
|
bc194944c08e8c8327a8a20bac3dc615b2e2a95f
|
23e0a66a5d7b20f3f992e44ae22b33a0eebdbce2
|
refs/heads/master
| 2020-05-20T14:25:39.398668
| 2019-04-01T23:53:55
| 2019-04-01T23:53:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 623
|
py
|
from datetime import datetime, date
import json
class DigitalWalletTokenHash(object):
def __init__(self, json_response):
self.json_response = json_response
def __str__(self):
return json.dumps(self.json_response, default=self.json_serial)
@staticmethod
def json_serial(o):
if isinstance(o, datetime) or isinstance(o, date):
return o.__str__()
@property
def token(self):
return self.json_response.get('token', None)
def __repr__(self):
return '<Marqeta.response_models.digital_wallet_token_hash.DigitalWalletTokenHash>' + self.__str__()
|
[
"amaratkere@marqeta.com"
] |
amaratkere@marqeta.com
|
c167c5819bfa452fa8fdba057ff142fbdbde00fe
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/090_logging/examples/nuke/34-python_in_production-logging_to_a_qt_widget/logger.py
|
c0e05b78da6905f18952e733200c169b31a72bf1
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 2,774
|
py
|
import logging
import sys
class Logger(object):
LOGGER_NAME = "Zurbrigg"
FORMAT_DEFAULT = "[%(name)s][%(levelname)s] %(message)s"
LEVEL_DEFAULT = logging.DEBUG
PROPAGATE_DEFAULT = True
_logger_obj = None
@classmethod
def logger_obj(cls):
if not cls._logger_obj:
if cls.logger_exists():
cls._logger_obj = logging.getLogger(cls.LOGGER_NAME)
else:
cls._logger_obj = logging.getLogger(cls.LOGGER_NAME)
cls._logger_obj.setLevel(cls.LEVEL_DEFAULT)
cls._logger_obj.propagate = cls.PROPAGATE_DEFAULT
fmt = logging.Formatter(cls.FORMAT_DEFAULT)
stream_handler = logging.StreamHandler(sys.stderr)
stream_handler.setFormatter(fmt)
cls._logger_obj.addHandler(stream_handler)
return cls._logger_obj
@classmethod
def logger_exists(cls):
return cls.LOGGER_NAME in logging.Logger.manager.loggerDict.keys()
@classmethod
def set_level(cls, level):
lg = cls.logger_obj()
lg.setLevel(level)
@classmethod
def set_propagate(cls, propagate):
lg = cls.logger_obj()
lg.propagate = propagate
@classmethod
def debug(cls, msg, *args, **kwargs):
lg = cls.logger_obj()
lg.debug(msg, *args, **kwargs)
@classmethod
def info(cls, msg, *args, **kwargs):
lg = cls.logger_obj()
lg.info(msg, *args, **kwargs)
@classmethod
def warning(cls, msg, *args, **kwargs):
lg = cls.logger_obj()
lg.warning(msg, *args, **kwargs)
@classmethod
def error(cls, msg, *args, **kwargs):
lg = cls.logger_obj()
lg.error(msg, *args, **kwargs)
@classmethod
def critical(cls, msg, *args, **kwargs):
lg = cls.logger_obj()
lg.critical(msg, *args, **kwargs)
@classmethod
def log(cls, level, msg, *args, **kwargs):
lg = cls.logger_obj()
lg.log(level, msg, *args, **kwargs)
@classmethod
def exception(cls, msg, *args, **kwargs):
lg = cls.logger_obj()
lg.exception(msg, *args, **kwargs)
@classmethod
def write_to_file(cls, path, level=logging.WARNING):
file_handler = logging.FileHandler(path)
file_handler.setLevel(level)
fmt = logging.Formatter("[%(asctime)s][%(levelname)s] %(message)s")
file_handler.setFormatter(fmt)
lg = cls.logger_obj()
lg.addHandler(file_handler)
if __name__ == "__main__":
Logger.set_propagate(False)
Logger.debug("debug message")
Logger.info("info message")
Logger.warning("warning message")
Logger.error("error message")
Logger.critical("critical message")
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
907e51e3e9abb9e4b37491c1122a2c555afe1fcc
|
42674d7355d852e6ec7071830bb87d781ab63ad3
|
/bitmovin/resources/models/manifests/dash/audio_adaptation_set.py
|
71e54986b06f54a233eb3ec0495f83cff6d90a84
|
[
"Unlicense"
] |
permissive
|
bitmovin/bitmovin-python
|
57b1eb5deb7e38f3079e0ded546ec762753c3132
|
d183718d640117dd75141da261901dc2f60433b0
|
refs/heads/master
| 2023-07-11T02:40:59.277881
| 2020-01-28T14:49:15
| 2020-01-28T14:49:15
| 72,857,798
| 46
| 27
|
Unlicense
| 2019-12-17T13:59:51
| 2016-11-04T15:01:56
|
Python
|
UTF-8
|
Python
| false
| false
| 663
|
py
|
from .abstract_adaptation_set import AbstractAdaptationSet
class AudioAdaptationSet(AbstractAdaptationSet):
def __init__(self, lang, id_=None, custom_data=None):
super().__init__(id_=id_, custom_data=custom_data)
self.lang = lang
@classmethod
def parse_from_json_object(cls, json_object):
adaptation_set = AbstractAdaptationSet.parse_from_json_object(json_object=json_object)
id_ = adaptation_set.id
custom_data = adaptation_set.customData
lang = json_object['lang']
audio_adaptation_set = AudioAdaptationSet(id_=id_, custom_data=custom_data, lang=lang)
return audio_adaptation_set
|
[
"dominic.miglar@netunix.at"
] |
dominic.miglar@netunix.at
|
3712937801b4655d2f06e615f42f6119be1d0be2
|
d9e5f868392cc846a14577e2578332dd389766a5
|
/ex13.py
|
2a4652a2c2319f92b92f4fdfda224686a6f5811d
|
[] |
no_license
|
quanlidavid/Learn_Python_the_Hard_Way
|
8d8d9c9906d1e6b0de1a1dae78fbf4fd150c466c
|
bc591552efbeb2db588c831bf5280cbe21e11246
|
refs/heads/master
| 2021-05-16T11:18:13.171264
| 2017-09-27T05:56:20
| 2017-09-27T05:56:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 260
|
py
|
from sys import argv
script, frist, second, third = argv
print("The script is called:", script)
print("Your first variable is:", frist)
print("Your second variable is:", second)
print("Your third variable is:", third)
list1=['aa','bb']
a,b=list1
print(a,b)
|
[
"quanlidavid@gmail.com"
] |
quanlidavid@gmail.com
|
e27cf93f24bc53f6f16fd551ed429b1aca98d4d2
|
480bee2fee71fa5f91fcece256918795adfb3eda
|
/detector/model.py
|
7ebca4a47e922b335504cca41e45677a4865c1e2
|
[] |
no_license
|
favyen/skyquery
|
f71d0095681660e4bce5324ae866371fe51e9e3a
|
dce2639314aaa06cba0d56aab1f7794744c22090
|
refs/heads/master
| 2023-08-22T17:48:08.697538
| 2021-09-27T02:14:52
| 2021-09-27T02:14:52
| 412,963,924
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,272
|
py
|
import numpy
import tensorflow as tf
import os
import os.path
import random
import math
import time
from PIL import Image
BATCH_SIZE = 4
KERNEL_SIZE = 3
class Model:
def _conv_layer(self, name, input_var, stride, in_channels, out_channels, options = {}):
activation = options.get('activation', 'relu')
dropout = options.get('dropout', None)
padding = options.get('padding', 'SAME')
batchnorm = options.get('batchnorm', False)
transpose = options.get('transpose', False)
with tf.variable_scope(name) as scope:
if not transpose:
filter_shape = [KERNEL_SIZE, KERNEL_SIZE, in_channels, out_channels]
else:
filter_shape = [KERNEL_SIZE, KERNEL_SIZE, out_channels, in_channels]
kernel = tf.get_variable(
'weights',
shape=filter_shape,
initializer=tf.truncated_normal_initializer(stddev=math.sqrt(2.0 / KERNEL_SIZE / KERNEL_SIZE / in_channels)),
dtype=tf.float32
)
biases = tf.get_variable(
'biases',
shape=[out_channels],
initializer=tf.constant_initializer(0.0),
dtype=tf.float32
)
if not transpose:
output = tf.nn.bias_add(
tf.nn.conv2d(
input_var,
kernel,
[1, stride, stride, 1],
padding=padding
),
biases
)
else:
batch = tf.shape(input_var)[0]
side = tf.shape(input_var)[1]
output = tf.nn.bias_add(
tf.nn.conv2d_transpose(
input_var,
kernel,
[batch, side * stride, side * stride, out_channels],
[1, stride, stride, 1],
padding=padding
),
biases
)
if batchnorm:
output = tf.contrib.layers.batch_norm(output, center=True, scale=True, is_training=self.is_training, decay=0.99)
if dropout is not None:
output = tf.nn.dropout(output, keep_prob=1-dropout)
if activation == 'relu':
return tf.nn.relu(output, name=scope.name)
elif activation == 'sigmoid':
return tf.nn.sigmoid(output, name=scope.name)
elif activation == 'none':
return output
else:
raise Exception('invalid activation {} specified'.format(activation))
def _fc_layer(self, name, input_var, input_size, output_size, options = {}):
activation = options.get('activation', 'relu')
dropout = options.get('dropout', None)
batchnorm = options.get('batchnorm', False)
with tf.variable_scope(name) as scope:
weights = tf.get_variable(
'weights',
shape=[input_size, output_size],
initializer=tf.truncated_normal_initializer(stddev=math.sqrt(2.0 / input_size)),
dtype=tf.float32
)
biases = tf.get_variable(
'biases',
shape=[output_size],
initializer=tf.constant_initializer(0.0),
dtype=tf.float32
)
output = tf.matmul(input_var, weights) + biases
if batchnorm:
output = tf.contrib.layers.batch_norm(output, center=True, scale=True, is_training=self.is_training, decay=0.99)
if dropout is not None:
output = tf.nn.dropout(output, keep_prob=1-dropout)
if activation == 'relu':
return tf.nn.relu(output, name=scope.name)
elif activation == 'sigmoid':
return tf.nn.sigmoid(output, name=scope.name)
elif activation == 'none':
return output
else:
raise Exception('invalid activation {} specified'.format(activation))
def __init__(self, bn=False, size=(512, 512), input_channels=6):
tf.reset_default_graph()
self.is_training = tf.placeholder(tf.bool)
self.inputs = tf.placeholder(tf.uint8, [None, size[0], size[1], input_channels])
self.float_inputs = tf.cast(self.inputs, tf.float32)/255.0# + tf.random.normal(tf.shape(self.inputs), stddev=0.04)*tf.cast(self.is_training, tf.float32)
self.targets = tf.placeholder(tf.float32, [None, size[0]/4, size[1]/4])
self.masks = tf.placeholder(tf.float32, [None, size[0]/4, size[1]/4])
self.learning_rate = tf.placeholder(tf.float32)
# layers
self.layer1 = self._conv_layer('layer1', self.float_inputs, 2, input_channels, 32, {'batchnorm': False}) # -> 256x256x32
self.layer2 = self._conv_layer('layer2', self.layer1, 2, 32, 64, {'batchnorm': bn}) # -> 128x128x64
self.layer3 = self._conv_layer('layer3', self.layer2, 2, 64, 64, {'batchnorm': bn}) # -> 64x64x64
self.layer4 = self._conv_layer('layer4', self.layer3, 2, 64, 64, {'batchnorm': bn}) # -> 32x32x64
self.layer5 = self._conv_layer('layer5', self.layer4, 1, 64, 64, {'batchnorm': bn}) # -> 32x32x64
self.layer6 = self._conv_layer('layer6', self.layer5, 2, 64, 64, {'batchnorm': bn, 'transpose': True}) # -> 64x64x64
self.layer7 = self._conv_layer('layer7', self.layer6, 2, 64, 64, {'batchnorm': bn, 'transpose': True}) # -> 128x128x64
#self.layer7 = tf.concat([self.layer2, tf.image.resize(self.layer5, [128, 128], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)], axis=3)
self.pre_outputs = self._conv_layer('pre_outputs', self.layer7, 1, 64, 1, {'activation': 'none', 'batchnorm': False})[:, :, :, 0] # -> 128x128x1
self.outputs = tf.nn.sigmoid(self.pre_outputs)
self.loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=self.targets, logits=self.pre_outputs) * self.masks)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.loss)
self.init_op = tf.initialize_all_variables()
self.saver = tf.train.Saver(max_to_keep=None)
|
[
"fbastani@perennate.com"
] |
fbastani@perennate.com
|
fe6aa27d544a7bc06532e7cb5bfad0801c9b1eba
|
8174d11add088a2413d5a7fdf8233059c3876f52
|
/docs/examples/pool.py
|
9265af58ef12bca9806eb7e1896aa4e7797bc85c
|
[
"MIT"
] |
permissive
|
AraHaan/aioredis
|
903eaaefb243c8bc8d70c9178baf721446c9cc7f
|
19be499015a8cf32580e937cbfd711fd48489eca
|
refs/heads/master
| 2023-03-17T03:16:46.281210
| 2022-02-22T14:33:33
| 2022-02-22T14:33:33
| 82,424,636
| 1
| 0
|
MIT
| 2023-03-07T15:57:29
| 2017-02-19T00:20:56
|
Python
|
UTF-8
|
Python
| false
| false
| 648
|
py
|
import asyncio
import aioredis
async def main():
redis = aioredis.from_url("redis://localhost", max_connections=10)
await redis.execute_command("set", "my-key", "value")
val = await redis.execute_command("get", "my-key")
print("raw value:", val)
async def main_pool():
pool = aioredis.ConnectionPool.from_url("redis://localhost", max_connections=10)
redis = aioredis.Redis(connection_pool=pool)
await redis.execute_command("set", "my-key", "value")
val = await redis.execute_command("get", "my-key")
print("raw value:", val)
if __name__ == "__main__":
asyncio.run(main())
asyncio.run(main_pool())
|
[
"sean_stewart@me.com"
] |
sean_stewart@me.com
|
4943e2a33bb554208d68eb6a684117fda0462433
|
44c372cd25a2496979fa29a1dc9131c54487d945
|
/data/zh50w/zh50w_process.py
|
0c551f731118ff55f6d1cef12a2e33090afd341d
|
[
"MIT"
] |
permissive
|
zhongerqiandan/OpenDialog
|
4ff4e65d0ade1efdd3029475634ae1cf38c7bdd3
|
f478b2a912c8c742da5ced510ac40da59217ddb3
|
refs/heads/master
| 2023-01-22T09:07:54.385604
| 2020-12-04T02:00:17
| 2020-12-04T02:00:17
| 318,419,052
| 0
| 1
|
MIT
| 2020-12-04T05:58:37
| 2020-12-04T05:58:37
| null |
UTF-8
|
Python
| false
| false
| 5,482
|
py
|
import csv
import random
from tqdm import tqdm
import ipdb
import sys
import pickle
sys.path.append('..')
from utils import read_stop_words
from collections import Counter
from gensim.summarization import bm25
from elasticsearch import Elasticsearch
'''
TODO
1. adding the reesponses into elasticsearch for q-r match
'''
class ESUtils:
def __init__(self, index_name, create_index=False):
self.es = Elasticsearch()
self.index = index_name
if create_index:
mapping = {
'properties': {
'context': {
'type': 'text',
'analyzer': 'ik_max_word',
'search_analyzer': 'ik_max_word'
}
}
}
if self.es.indices.exists(index=self.index):
self.es.indices.delete(index=self.index)
rest = self.es.indices.create(index=self.index)
print(rest)
rest = self.es.indices.put_mapping(body=mapping, index=self.index)
print(rest)
def insert_pairs(self, pairs):
count = self.es.count(index=self.index)['count']
print(f'[!] begin of the idx: {count}')
for i, qa in enumerate(tqdm(pairs)):
data = {
'context': qa[0],
'response': qa[1]
}
self.es.index(index=self.index, body=data)
print(f'[!] insert data over, whole size: {self.es.count(index=self.index)["count"]}')
class ESChat:
def __init__(self, index_name):
self.es = Elasticsearch()
self.index = index_name
def search(self, query, samples=10):
dsl = {
'query': {
'match': {
'context': query
}
}
}
hits = self.es.search(index=self.index, body=dsl, size=samples)['hits']['hits']
rest = []
for h in hits:
rest.append({'score': h['_score'], 'context': h['_source']['context'],
'response': h['_source']['response']
})
return rest
def chat(self):
sentence = input('You are speaking: ').strip()
while sentence:
if sentence == 'exit':
break
rest = self.search(sentence)
for idx, i in enumerate(rest):
print(f'ESChat({idx}/{len(rest)}): {i["response"]}')
sentence = input('You are speaking: ').strip()
def read_file(path):
with open(path) as f:
data = f.read()
dialogs = data.split('\n\n')
dialogs = [dialog.split('\n') for dialog in dialogs if dialog.strip()]
random.shuffle(dialogs)
return dialogs
def write_file(dialogs, mode='train', samples=10):
chatbot = ESChat('retrieval_chatbot')
with open(f'{mode}.csv', 'w') as f:
f = csv.writer(f)
f.writerow(['Context', 'Response'] + [f'Retrieval_{i+1}' for i in range(samples)])
# f.writerow(['Context', 'Response'])
error_counter = 0
responses = [i[1] for i in dialogs]
for dialog in tqdm(dialogs):
rest = [i['response'] for i in chatbot.search(dialog[0], samples=samples+1)]
if dialog[1] in rest:
rest.remove(dialog[1])
dialog = list(dialog) + rest
if len(dialog) != samples + 2:
error_counter += 1
dialog.extend(random.sample(responses, samples+3-len(dialog)))
# assert len(dialog) == samples + 2, f'{len(dialog)} retrieval utterances are obtained'
f.writerow(dialog[:samples+2])
print(f'[!] finish writing the file {mode}.csv, error counter: {error_counter}')
def process_data(dialogs, samples=10, max_len=10, max_utter_len=50):
data = []
for dialog in tqdm(dialogs):
# dialog = [' '.join(list(jieba.cut(i))) for i in dialog]
context, response = dialog[-(max_len+1):-1], dialog[-1]
context = [i[-max_utter_len:] for i in context]
context = ' <eou> '.join(context)
data.append((context, response))
return data
def retrieval_model():
chatbot = ESChat('retrieval_chatbot')
print(f'[!] load retrieval model from ElasticSearch, default 10 replys.')
return chatbot
if __name__ == "__main__":
import sys
if sys.argv[1] == 'process':
data = read_file('train.txt')
whole_size = len(data)
train_size = (0, int(0.95 * whole_size))
dev_size = (train_size[1], train_size[1] + int(0.025 * whole_size))
test_size = (dev_size[1], whole_size)
print(f'data size: train({train_size[1]-train_size[0]}); dev({dev_size[1]-dev_size[0]}); test({test_size[1]-test_size[0]})')
train_data = data[train_size[0]:train_size[1]]
dev_data = data[dev_size[0]:dev_size[1]]
test_data = data[test_size[0]:test_size[1]]
train_data = process_data(train_data)
dev_data = process_data(dev_data)
test_data = process_data(test_data)
# write file
write_file(train_data, mode='train')
write_file(dev_data, mode='dev')
write_file(test_data, mode='test')
else:
# test elasticsearch
# data = read_file('zh50w/train.txt')
# pairs = [(' . '.join(i[:-1]), i[-1]) for i in data]
# ut = ESUtils('retrieval_chatbot', create_index=True)
# ut.insert_pairs(pairs)
chatbot = ESChat('retrieval_chatbot')
chatbot.chat()
|
[
"18811371908@163.com"
] |
18811371908@163.com
|
fe8763de336ee65092b7aaec84eea8912eb81c8c
|
df75b4d24416bb764db61931457f367872d8a66c
|
/django_states/main/migrations/0006_auto__add_field_statecapital_state__chg_field_statecapital_latitude__c.py
|
fa52b64db591b04f547b608edbe24fd3731be7db
|
[] |
no_license
|
Bofahda/states
|
bb1f7caf8409e363ba2cb67974464854f14570d8
|
11016ac07040177e81e53b1ea88739b4de0ea936
|
refs/heads/master
| 2020-12-24T16:58:56.789855
| 2015-08-12T09:20:53
| 2015-08-12T09:20:53
| 40,591,309
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,729
|
py
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'StateCapital.state'
db.add_column(u'main_statecapital', 'state',
self.gf('django.db.models.fields.related.OneToOneField')(to=orm['main.State'], unique=True, null=True),
keep_default=False)
# Changing field 'StateCapital.latitude'
db.alter_column(u'main_statecapital', 'latitude', self.gf('django.db.models.fields.FloatField')(null=True))
# Changing field 'StateCapital.longitude'
db.alter_column(u'main_statecapital', 'longitude', self.gf('django.db.models.fields.FloatField')(null=True))
# Changing field 'StateCapital.population'
db.alter_column(u'main_statecapital', 'population', self.gf('django.db.models.fields.IntegerField')(null=True))
def backwards(self, orm):
# Deleting field 'StateCapital.state'
db.delete_column(u'main_statecapital', 'state_id')
# Changing field 'StateCapital.latitude'
db.alter_column(u'main_statecapital', 'latitude', self.gf('django.db.models.fields.FloatField')(default=1))
# Changing field 'StateCapital.longitude'
db.alter_column(u'main_statecapital', 'longitude', self.gf('django.db.models.fields.FloatField')(default=1))
# Changing field 'StateCapital.population'
db.alter_column(u'main_statecapital', 'population', self.gf('django.db.models.fields.IntegerField')(default=1))
models = {
u'main.state': {
'Meta': {'object_name': 'State'},
'abbreviation': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'main.statecapital': {
'Meta': {'object_name': 'StateCapital'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'population': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'state': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['main.State']", 'unique': 'True', 'null': 'True'})
}
}
complete_apps = ['main']
|
[
"user@Users-MacBook-Air.local"
] |
user@Users-MacBook-Air.local
|
3748a9e8a475776c784dacc5951e89171f92e72b
|
cb2a4180ffc0df4296737134230397069de8da21
|
/accounts/signals.py
|
d6f2b8bc1882f084fe375b450158fdf3249fc531
|
[
"MIT"
] |
permissive
|
fagrimacs/fagrimacs_production
|
8a9cef4e1d73360301fd66f4f0b70ea4868ef610
|
ea1a8f92c41c416309cc1fdd8deb02f41a9c95a0
|
refs/heads/master
| 2022-12-23T22:08:27.768479
| 2020-09-24T10:10:35
| 2020-09-24T10:10:35
| 295,315,768
| 0
| 0
|
MIT
| 2020-09-24T10:10:36
| 2020-09-14T05:44:21
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 646
|
py
|
from django.db.models.signals import pre_save
from django.dispatch import receiver
from .models import UserProfile
@receiver(pre_save, sender=UserProfile)
def delete_prev_profile_pic(sender, instance, **kwargs):
if instance.pk:
try:
prev_profile = UserProfile.objects.get(
pk=instance.pk).profile_pic
except UserProfile.DoesNotExist:
return
else:
new_profile = instance.profile_pic
if prev_profile and prev_profile.url != new_profile.url:
if prev_profile != 'profile_pics/user.png':
prev_profile.delete(save=False)
|
[
"zendainnocent@gmail.com"
] |
zendainnocent@gmail.com
|
2265604085f0b363acfc4bbfcfd9c1294885eb23
|
626b14ce13986b6d5e03143e151004247659625a
|
/Day01-15/code/Day07/dict2.py
|
1f89c849d510ca7c3702747ec28763684b8c1a4f
|
[] |
no_license
|
Focavn/Python-100-Days
|
c7586ecf7ae3f1fd42f024558bb998be23ee9df8
|
d8de6307aeff9fe31fd752bd7725b9cc3fbc084b
|
refs/heads/master
| 2021-08-08T17:57:02.025178
| 2020-09-17T11:58:04
| 2020-09-17T11:58:04
| 220,427,144
| 0
| 0
| null | 2019-11-08T08:59:43
| 2019-11-08T08:59:41
| null |
UTF-8
|
Python
| false
| false
| 575
|
py
|
"""
字典的常用操作
Version: 0.1
Author: 骆昊
Date: 2018-03-06
"""
def main():
stu = {'name': '骆昊', 'age': 38, 'gender': True}
print(stu)
print(stu.keys())
print(stu.values())
print(stu.items())
for elem in stu.items():
print(elem)
print(elem[0], elem[1])
if 'age' in stu:
stu['age'] = 20
print(stu)
stu.setdefault('score', 60)
print(stu)
stu.setdefault('score', 100)
print(stu)
stu['score'] = 100
print(stu)
if __name__ == '__main__':
main()
|
[
"Focavn@users.github.com"
] |
Focavn@users.github.com
|
216d0e5c1001e89b218aef24c8cabfa7ee8027a8
|
5a310398592ddb75d27dc67c9b45198e31cb0d55
|
/rfid-v1.py
|
d941851aa03ed4b0f8dbaef378689460a5bf2f2a
|
[] |
no_license
|
ch-tseng/rfidDoor2
|
97871be9d431515425180b8e0893400a9b147831
|
ed04b794d6c70dc223bb2f75e5d7367bea8353b4
|
refs/heads/master
| 2021-01-20T04:21:23.102422
| 2017-05-04T05:03:02
| 2017-05-04T05:03:02
| 89,674,676
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,950
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import socket
import urllib.request
import logging
import json
#import base64
import binascii
import sys
import time
# A UDP server
# Set up a UDP server
UDPSock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
# Listen on port 21567
# (to all IP addresses on this system)
listen_addr = ("",8080)
UDPSock.bind(listen_addr)
debugPrint = False
urlHeadString = "http://data.sunplusit.com/Api/DoorRFIDInfo?code=83E4621643F7B2E148257244000655E3&rfid="
#-----------------------------------------
#logging記錄
logger = logging.getLogger('msg')
hdlr = logging.FileHandler('/home/chtseng/rfidDoor/msg.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
def is_json(myjson):
try:
json_object = json.loads(myjson)
except ValueError:
return False
return True
while True:
data,addr = UDPSock.recvfrom(1024)
#tmpTAGS, tmpTIMES = scanTAGS(binascii.b2a_hex(data).decode('ascii'))
readHEX = binascii.b2a_hex(data).decode('ascii')
logger.info('Received rfid:' + readHEX)
if(debugPrint==True):
print (readHEX)
try:
webReply = urllib.request.urlopen(urlHeadString + readHEX).read()
webReply = webReply.decode('utf-8').rstrip()
logger.info('webReply: {}'.format(webReply))
if(debugPrint==True):
print('webReply: {}'.format(webReply))
print(urlHeadString + readHEX)
print("webReply:" + webReply)
# listTAGs = webReply.split("")
except Exception:
print("Unexpected error:", sys.exc_info()[0])
logger.info('Unexpected error:' + str(sys.exc_info()[0]))
webReply = "[]"
pass
if(is_json(webReply)==True):
jsonReply = json.loads(webReply)
if(debugPrint==True):
print (jsonReply)
#time.sleep(1)
|
[
"ch.tseng@sunplusit.com"
] |
ch.tseng@sunplusit.com
|
64541b443d026560b213cf649fddf14d9174859e
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/bob/894c37be40744bf289920a1bd2eb6ba4.py
|
8a84f39bc980357d36a643c97a4bffbd58c75679
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 233
|
py
|
def hey(phrase):
phrase = phrase.strip()
if len(phrase) == 0:
return 'Fine. Be that way!'
elif phrase.isupper():
return 'Whoa, chill out!'
elif phrase.endswith('?') :
return 'Sure.'
else:
return 'Whatever.'
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
62c90294b18a2c3fd268af603a53b8a22b86605c
|
30754a148b79903d6e49399f1f270c79934ce389
|
/fuzzinator/ui/tui/tui_listener.py
|
9427825e28a0e23a7c0a9b1feb0cd1a50d817e82
|
[
"BSD-3-Clause"
] |
permissive
|
syedkhalid/fuzzinator
|
720ffc552c595b50de46e4e4e51f3a01cdc9aa77
|
f90b58605de563e77b85ed0d54d2beb29efc7d14
|
refs/heads/master
| 2021-04-09T17:31:06.625840
| 2018-03-12T14:37:18
| 2018-03-12T15:21:27
| 125,814,277
| 1
| 0
|
BSD-3-Clause
| 2018-03-19T06:53:29
| 2018-03-19T06:53:29
| null |
UTF-8
|
Python
| false
| false
| 1,054
|
py
|
# Copyright (c) 2016-2017 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
import inspect
import os
from fuzzinator.listener import EventListener
class TuiListener(EventListener):
def __init__(self, pipe, events, lock):
for fn, _ in inspect.getmembers(EventListener, predicate=inspect.isfunction):
setattr(self, fn, self.Trampoline(name=fn, pipe=pipe, events=events, lock=lock))
class Trampoline(object):
def __init__(self, name, pipe, events, lock):
self.name = name
self.pipe = pipe
self.events = events
self.lock = lock
def __call__(self, **kwargs):
with self.lock:
try:
self.events.put_nowait({'fn': self.name, 'kwargs': kwargs})
os.write(self.pipe, b'x')
except:
pass
|
[
"reni@inf.u-szeged.hu"
] |
reni@inf.u-szeged.hu
|
06d1a676a79e4717ef3a8f9091ba8612972c4f88
|
af829a7bb04f515b01dc78aaeb318991ead50d24
|
/cart/forms.py
|
05c7b1e1972cd2dd46c070cb532696742bea4a12
|
[] |
no_license
|
marcinpelszyk/Djnago-ecom
|
75ffffb1d6fcd5457d9db8bf166610b15994203f
|
8ae049087c952b52f287dd58f6a91a2e83113921
|
refs/heads/main
| 2023-06-08T20:14:22.007418
| 2021-06-14T20:56:26
| 2021-06-14T20:56:26
| 376,601,973
| 0
| 0
| null | 2021-06-14T20:56:27
| 2021-06-13T17:26:48
|
HTML
|
UTF-8
|
Python
| false
| false
| 192
|
py
|
from django import forms
from django import forms
from .models import OrderItem
class AddCartForm(forms.ModelForm):
class Meta:
model = OrderItem
fields = ['quantity']
|
[
"marcin.pelszyk90@gmail.com"
] |
marcin.pelszyk90@gmail.com
|
d264e28ed2341e4155dedc2bdc2156861c78747e
|
e7ce273f404f82fd8672c97e50b386509c8f9870
|
/Advanced/File_Handling/Directory_Traversal.py
|
5aa0fbcbb285a6c22e60ca7405b8dd188b7a9b8b
|
[] |
no_license
|
rzlatkov/Softuni
|
3edca300f8ecdcfd86e332557712e17552bc91c3
|
a494e35bff965b2b9dccc90e1381d5a1a23737a1
|
refs/heads/main
| 2023-07-02T12:49:59.737043
| 2021-08-13T20:47:07
| 2021-08-13T20:47:07
| 319,088,872
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,352
|
py
|
# Directory Traversal
import os
USER = os.getlogin()
USED_PATH = '/home/rzlatkov/Softuni/Advanced/File_Handling/' # first level traverse only
ROOT_PATH = './' # traverse everything
REPORT_PATH_WINDOWS = f'C:\\Users\\{USER}\\Desktop\\report.txt' # for Win users
REPORT_PATH_LINUX = f'/home/{USER}/Desktop/report.txt' # I am coding on a Linux (Manjaro)
def traverse(path):
dictionary_of_files = {}
path = os.walk(path)
for _,_, files in path:
for f in files:
extension = f[f.index('.'):]
if not extension in dictionary_of_files:
dictionary_of_files[extension] = []
dictionary_of_files[extension].append(f)
return dictionary_of_files
def sort_extensions(dictionary_of_files):
return dict(sorted(dictionary_of_files.items(), key=lambda x: x[0]))
def sort_filenames(dictionary_list_values):
return sorted(dictionary_list_values, key=lambda x: x)
def write_to_report(result, report_path):
with open(report_path, 'w') as writer:
for ext, fnames in result.items():
writer.write(ext + '\n')
sorted_fnames = sort_filenames(fnames)
for f in sorted_fnames:
writer.write(f"- - - {f}\n")
files = traverse(USED_PATH)
sorted_ext = sort_extensions(files)
write_to_report(sorted_ext, REPORT_PATH_LINUX)
|
[
"nozzller@gmail.com"
] |
nozzller@gmail.com
|
704361a75a15b4ff7147ee4334cde26f9da2f4dd
|
180beda50f31031bd3ba1668067bdb73fc1a7686
|
/website/members/management/commands/generate_member_invoices.py
|
e73095aaebe1d829fb36e519f1585796e73b12cd
|
[
"MIT"
] |
permissive
|
eamanu/asoc_members
|
053a05563a20ff4cafd09020367f3d60f149392e
|
bf2e99e9c63c60a59bdfd10ca1812d78851cbde6
|
refs/heads/master
| 2020-11-26T14:20:46.857545
| 2020-01-06T12:57:30
| 2020-01-06T12:57:30
| 229,101,268
| 0
| 0
|
MIT
| 2019-12-19T17:01:15
| 2019-12-19T17:01:14
| null |
UTF-8
|
Python
| false
| false
| 7,788
|
py
|
import datetime
import os
from django.conf import settings
from django.core.mail import EmailMessage
from django.core.management.base import BaseCommand
from django.db.models import Max
from members import logic
from members.models import Quota, Person, Payment, Member, PaymentStrategy
from . import _afip, _gdrive
INVOICES_FROM = '2018-08-01 00:00+03'
GMTminus3 = datetime.timezone(datetime.timedelta(hours=-3))
# mail stuff
MAIL_SUBJECT = "Factura por pago de cuota(s) a la Asociación Civil Python Argentina"
MAIL_TEXT = """\
Hola!
Adjunta va la factura por el pago hecho en fecha {payment_date:%Y-%m-%d}.
¡Gracias! Saludos,
--
. Lalita
.
Asociación Civil Python Argentina
http://ac.python.org.ar/
(claro, este mail es automático, soy une bot, pero contestá el mail sin problemas que
le va a llegar al humane correspondiente)
"""
PDF_MIMETYPE = 'application/pdf'
def _send_mail(payment_date, recipient, attach_path):
text = MAIL_TEXT.format(payment_date=payment_date)
mail = EmailMessage(MAIL_SUBJECT, text, settings.EMAIL_FROM, [recipient])
filename = os.path.basename(attach_path)
with open(attach_path, "rb") as fh:
attach_content = fh.read()
mail.attach(filename, attach_content, PDF_MIMETYPE)
mail.send()
class Command(BaseCommand):
help = "Generate the missing invoices"
def add_arguments(self, parser):
parser.add_argument('--limit', type=int, nargs='?', default=1)
parser.add_argument(
'--invoice-date', type=str, nargs='?', help="Invoice date (%Y-%m-%d), forces limit=1")
def handle(self, *args, **options):
limit = options['limit']
invoice_date = options['invoice_date']
if invoice_date is None:
invoice_date = datetime.date.today()
else:
invoice_date = datetime.datetime.strptime(invoice_date, "%Y-%m-%d").date()
limit = 1
print("Forcing invoice date to {} (also limit=1)".format(invoice_date))
records = []
# check AFIP
_afip.verify_service()
# get the greatest invoice number used (once, will keep updated later)
_max_invoice_number_query = Payment.objects.aggregate(Max('invoice_number'))
max_invoice_number = _max_invoice_number_query['invoice_number__max']
print("Found max invoice number {}".format(max_invoice_number))
# get payments after we started automatically that still have no invoice generated
payments_per_invoice = {}
persons_per_invoice = {}
payments = (
Payment.objects.filter(timestamp__gte=INVOICES_FROM, invoice_ok=False)
.exclude(strategy__platform=PaymentStrategy.CREDIT)
.order_by('timestamp', 'pk').all()
)
print("Found {} payments to process".format(len(payments)))
if len(payments) > limit:
payments = payments[:limit]
print(" truncating to {}".format(limit))
for payment in payments:
print("Generating invoice for payment", payment)
record = {
'invoice_date': invoice_date,
}
records.append(record)
# get the related member (if None, or multiple, still not supported!)
_members = Member.objects.filter(patron=payment.strategy.patron).all()
assert len(_members) == 1, "multiple or no members for the patron is not supported"
member = _members[0]
# only process payments for normal members (benefactor members get invoices done
# by hand)
person = member.entity
if isinstance(person, Person):
print(" person found", person)
else:
print(" IGNORING payment, member {} is not a person: {}".format(member, person))
continue
# if payment still doesn't have a number, add one to latest and save;
# in any case, use it
if not payment.invoice_number:
max_invoice_number += 1
payment.invoice_number = max_invoice_number
payment.invoice_spoint = settings.AFIP['selling_point']
payment.save()
print(" using new invoice number", payment.invoice_number)
else:
print(" using already stored invoice number", payment.invoice_number)
assert payment.invoice_spoint == settings.AFIP['selling_point']
payments_per_invoice[payment.invoice_number] = payment
record['invoice'] = payment.invoice_number
# we bill one item, for the whole amount: "3 quotas for $300", instead of billing
# 3 x "1 quota for $100", which would be problematic if the paid amount is
# not exactly 300
record['amount'] = payment.amount
record['quantity'] = 1
# get all billing data from the person
persons_per_invoice[payment.invoice_number] = person
record['dni'] = person.document_number
record['fullname'] = person.full_name
record['address'] = person.street_address
record['city'] = person.city
record['zip_code'] = person.zip_code
record['province'] = person.province
tstamp_argentina = payment.timestamp.astimezone(GMTminus3)
record['payment_comment'] = "Pago via {} ({:%Y-%m-%d %H:%M})".format(
payment.strategy.platform_name, tstamp_argentina)
# get quotas for the payment; we don't show the period in the description
# as there's a specific field for that
quotas = list(Quota.objects.filter(payment=payment).order_by('year', 'month').all())
assert quotas
if len(quotas) == 1:
description = "1 cuota social"
else:
description = "{} cuotas sociales".format(len(quotas))
record['description'] = description
from_quota = quotas[0]
from_day = datetime.date(from_quota.year, from_quota.month, 1)
to_quota = quotas[-1]
ny, nm = logic.increment_year_month(to_quota.year, to_quota.month)
to_day = datetime.date(ny, nm, 1) - datetime.timedelta(days=1)
record['service_date_from'] = from_day.strftime("%Y%m%d")
record['service_date_to'] = to_day.strftime("%Y%m%d")
print(" found {} quota(s) ({} - {})".format(
len(quotas), record['service_date_from'], record['service_date_to']))
try:
results = _afip.generate_invoices(records)
except Exception:
print("PROBLEMS generating invoices with records", records)
raise
# save the results for the generated ok invoices and send the proper mails
for invoice_number, result in sorted(results.items()):
print("Post-processing invoice {} at {}".format(
invoice_number, result.get('pdf_path')))
if not result['invoice_ok']:
print(" WARNING: invoice NOT authorized ok")
continue
payment = payments_per_invoice[invoice_number]
payment.invoice_ok = True
payment.save()
# upload the invoice to google drive
_gdrive.upload_invoice(result['pdf_path'], invoice_date)
print(" uploaded to gdrive OK")
# send the invoice by mail
person = persons_per_invoice[invoice_number]
_send_mail(payment.timestamp, person.email, result['pdf_path'])
print(" sent by mail OK")
# invoice uploaded to gdrive and sent ok, don't need it here anymore
os.remove(result['pdf_path'])
|
[
"facundo@taniquetil.com.ar"
] |
facundo@taniquetil.com.ar
|
3cf43fa8092e181dca265178db23f042cb43c200
|
8e304f1291480db18b9727efa61647b369531172
|
/csvkit/convert/js.py
|
8757c65a2bfbc280bab8fc78a153f0f7dcf57e4c
|
[
"MIT"
] |
permissive
|
zviri/csvkit
|
4439fff0e376d089f89420fabca245c25eb12dc5
|
39f5d3b6c7d6eaaf145e7e01fa247292763da16d
|
refs/heads/master
| 2021-01-12T20:54:42.673449
| 2014-09-04T14:11:06
| 2014-09-04T14:11:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,879
|
py
|
#!/usr/bin/env python
import json
import six
from csvkit import CSVKitWriter
def parse_object(obj, path=''):
"""
Recursively parse JSON objects and a dictionary of paths/keys and values.
Inspired by JSONPipe (https://github.com/dvxhouse/jsonpipe).
"""
if isinstance(obj, dict):
iterator = obj.iteritems()
elif isinstance(obj, (list, tuple)):
iterator = enumerate(obj)
else:
return { path.strip('/'): obj }
d = {}
for key, value in iterator:
key = six.text_type(key)
d.update(parse_object(value, path + key + '/'))
return d
def json2csv(f, key=None, **kwargs):
"""
Convert a JSON document into CSV format.
The top-level element of the input must be a list or a dictionary. If it is a dictionary, a key must be provided which is an item of the dictionary which contains a list.
"""
document = f.read()
js = json.loads(document)
if isinstance(js, dict):
if not key:
raise TypeError('When converting a JSON document with a top-level dictionary element, a key must be specified.')
js = js[key]
if not isinstance(js, list):
raise TypeError('Only JSON documents with a top-level list element are able to be converted (or a top-level dictionary if specifying a key).')
field_set = set()
flat = []
for obj in js:
flat.append(parse_object(obj))
for obj in flat:
field_set.update(obj.keys())
fields = sorted(list(field_set))
o = six.StringIO()
writer = CSVKitWriter(o)
writer.writerow(fields)
for i in flat:
row = []
for field in fields:
if field in i:
row.append(i[field])
else:
row.append(None)
writer.writerow(row)
output = o.getvalue()
o.close()
return output
|
[
"staringmonkey@gmail.com"
] |
staringmonkey@gmail.com
|
6389cd069b984d4e989a8c114236bd598cef97a2
|
a89dfda3732eb73863b3e2fb1ebb46f1cb40973a
|
/txweb/tests/test_util_basic_sanitize_render_output.py
|
3ea1634b5077da8b2f699e5319c384c2d49cc0f1
|
[
"MIT"
] |
permissive
|
devdave/txWeb
|
543ccb7be0671a5e83959bb7cfc8e7804f04a74a
|
e447fbefd16134cb2f83323c04c20c41638d7da3
|
refs/heads/master
| 2022-12-15T18:11:50.880675
| 2021-03-24T18:48:16
| 2021-03-24T18:48:16
| 2,116,693
| 1
| 0
|
MIT
| 2022-12-08T04:28:41
| 2011-07-28T03:55:43
|
Python
|
UTF-8
|
Python
| false
| false
| 568
|
py
|
import pytest
from txweb.util.basic import sanitize_render_output
from twisted.web.server import NOT_DONE_YET
from twisted.internet.defer import Deferred
def test_full_suite_coverage():
assert sanitize_render_output("Foo") == b"Foo"
assert sanitize_render_output(b"Foo") == b"Foo"
with pytest.raises(RuntimeError):
assert sanitize_render_output(("Foo",))
assert sanitize_render_output(NOT_DONE_YET) == NOT_DONE_YET
d = Deferred()
assert sanitize_render_output(d) == NOT_DONE_YET
assert sanitize_render_output(123) == b"123"
|
[
"devdave@ominian.net"
] |
devdave@ominian.net
|
c7a84219541a207b77a6abe222131259e8320dcf
|
18a79067223932c2f7aa6ff6b81d0b3f36169db2
|
/atcoder/abc178/A.py
|
fbe81ed981719c0a616b1b4817d553d4699e8bb1
|
[] |
no_license
|
aadiupadhyay/CodeForces
|
894b0e5faef73bfd55a28c2058fb0ca6f43c69f9
|
76dac4aa29a2ea50a89b3492387febf6515cf43e
|
refs/heads/master
| 2023-04-12T17:58:52.733861
| 2021-05-07T20:08:00
| 2021-05-11T20:07:11
| 330,149,645
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 337
|
py
|
from sys import stdin,stdout
st=lambda:list(stdin.readline().strip())
li=lambda:list(map(int,stdin.readline().split()))
mp=lambda:map(int,stdin.readline().split())
inp=lambda:int(stdin.readline())
pr=lambda n: stdout.write(str(n)+"\n")
mod=1000000007
def solve():
n=inp()
print((n+1)%2)
for _ in range(1):
solve()
|
[
"upadhyay.aaditya2001@gmail.com"
] |
upadhyay.aaditya2001@gmail.com
|
3f912421cf12848e776f7f30387961e82a570848
|
ef32b87973a8dc08ba46bf03c5601548675de649
|
/pytglib/api/functions/get_web_app_url.py
|
fd431d0848611e166908d7d79bd1b425fdebbbee
|
[
"MIT"
] |
permissive
|
iTeam-co/pytglib
|
1a7580f0e0c9e317fbb0de1d3259c8c4cb90e721
|
d3b52d7c74ee5d82f4c3e15e4aa8c9caa007b4b5
|
refs/heads/master
| 2022-07-26T09:17:08.622398
| 2022-07-14T11:24:22
| 2022-07-14T11:24:22
| 178,060,880
| 10
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,089
|
py
|
from ..utils import Object
class GetWebAppUrl(Object):
"""
Returns an HTTPS URL of a Web App to open after keyboardButtonTypeWebApp button is pressed
Attributes:
ID (:obj:`str`): ``GetWebAppUrl``
Args:
bot_user_id (:obj:`int`):
Identifier of the target bot
url (:obj:`str`):
The URL from the keyboardButtonTypeWebApp button
theme (:class:`telegram.api.types.themeParameters`):
Preferred Web App theme; pass null to use the default theme
Returns:
HttpUrl
Raises:
:class:`telegram.Error`
"""
ID = "getWebAppUrl"
def __init__(self, bot_user_id, url, theme, extra=None, **kwargs):
self.extra = extra
self.bot_user_id = bot_user_id # int
self.url = url # str
self.theme = theme # ThemeParameters
@staticmethod
def read(q: dict, *args) -> "GetWebAppUrl":
bot_user_id = q.get('bot_user_id')
url = q.get('url')
theme = Object.read(q.get('theme'))
return GetWebAppUrl(bot_user_id, url, theme)
|
[
"arshshia@gmail.com"
] |
arshshia@gmail.com
|
ec8e7dafe20595ebc94fed5089fa5fc70c148552
|
6147d3d059a048be57aaabe3519551ed4bc305ec
|
/config/management/commands/fs2import.py
|
ed4cb19b50f25028e6c1954d36d1fe16bcc534f1
|
[
"MIT"
] |
permissive
|
a-mere-peasant/MangAdventure
|
a8f7fdfddf5ae65e645b0e0e0d197f2b0033bc8d
|
afbcdb5ab68bfc801550c8383568f7265e70b5ab
|
refs/heads/master
| 2020-08-06T13:38:59.062119
| 2019-10-05T12:22:53
| 2019-10-05T12:22:53
| 212,808,131
| 0
| 0
|
MIT
| 2019-10-04T13:07:47
| 2019-10-04T12:18:27
| null |
UTF-8
|
Python
| false
| false
| 4,613
|
py
|
from os.path import abspath, join
from xml.etree import cElementTree as et
from django.core.files import File
from django.core.management import BaseCommand
from groups.models import Group
from reader.models import Chapter, Page, Series
def _get_element(tables, name):
return list(filter(
lambda t: t.attrib['name'].endswith(name), tables
))
def _get_column(table, name):
text = table.find('column[@name="%s"]' % name).text
return text if text is not None else ''
def _sort_children(tables, name):
return sorted(tables, key=lambda p: _get_column(p, name))
class Command(BaseCommand):
help = 'Imports data from FoolSlide2.'
def add_arguments(self, parser):
parser.add_argument(
'root', type=str,
help='The path to the root directory of the FS2 installation.'
)
parser.add_argument(
'data', type=str,
help="The path to FS2's exported data (in XML format)."
)
def handle(self, *args, **options):
root = abspath(options['root'])
data = abspath(options['data'])
tables = et.parse(data).findall('database/table')
content = join(root, 'content', 'comics')
directories = {'series': [], 'chapters': []}
elements = {
'series': _get_element(tables, 'comics'),
'chapters': _get_element(tables, 'chapters'),
'pages': _get_element(tables, 'pages'),
'groups': _get_element(tables, 'teams')
}
all_groups = []
for g in elements['groups']:
group = Group(
id=_get_column(g, 'id'),
name=_get_column(g, 'name'),
website=_get_column(g, 'url'),
twitter=_get_column(g, 'twitter'),
irc=_get_column(g, 'irc')
)
all_groups.append(group)
Group.objects.bulk_create(all_groups)
all_series = []
for s in elements['series']:
slug = _get_column(s, 'stub')
series = Series(
id=_get_column(s, 'id'), slug=slug,
title=_get_column(s, 'name'),
description=_get_column(s, 'description'),
)
thumb = _get_column(s, 'thumbnail')
series_dir = join(content, '%s_%s' % (
slug, _get_column(s, 'uniqid')
))
cover = join(series_dir, 'thumb_%s' % thumb)
with open(cover, 'rb') as f:
series.cover.save(thumb, File(f), save=False)
all_series.append(series)
directories['series'].append(
(_get_column(s, 'id'), series_dir)
)
Series.objects.bulk_create(all_series)
all_chapters = []
chapter_groups = []
groups_through = Chapter.groups.through
for c in elements['chapters']:
cid = _get_column(c, 'id')
sid = _get_column(c, 'comic_id')
number = float('%s.%s' % (
_get_column(c, 'chapter') or '0',
_get_column(c, 'subchapter') or '0'
))
volume = int(_get_column(c, 'volume') or '0')
chapter = Chapter(
id=cid, series_id=sid,
title=_get_column(c, 'name'),
volume=volume, number=number
)
gid = _get_column(c, 'team_id')
if gid:
chapter_groups.append(
groups_through(chapter_id=cid, group_id=gid)
)
_dir = next(d[1] for d in directories['series'] if d[0] == sid)
directories['chapters'].append((
cid, join(_dir, '%s_%s' % (
_get_column(c, 'stub'), _get_column(c, 'uniqid')
))
))
all_chapters.append(chapter)
Chapter.objects.bulk_create(all_chapters)
groups_through.objects.bulk_create(chapter_groups)
all_pages = []
page_numbers = {}
for p in _sort_children(elements['pages'], 'filename'):
pid = _get_column(p, 'id')
cid = _get_column(p, 'chapter_id')
page_numbers[cid] = page_numbers.get(cid, 0) + 1
page = Page(id=pid, chapter_id=cid, number=page_numbers[cid])
_dir = next(d[1] for d in directories['chapters'] if d[0] == cid)
fname = _get_column(p, 'filename')
with open(join(_dir, fname), 'rb') as f:
page.image.save(fname, File(f), save=False)
all_pages.append(page)
Page.objects.bulk_create(all_pages)
|
[
"chronobserver@disroot.org"
] |
chronobserver@disroot.org
|
7bdb3032d0b87e6e58936035f17049cb25437466
|
6f05f7d5a67b6bb87956a22b988067ec772ba966
|
/data/train/python/4fde7f3e48576985304dbb54c7ab85f5d1c4d4e9observer.py
|
4fde7f3e48576985304dbb54c7ab85f5d1c4d4e9
|
[
"MIT"
] |
permissive
|
harshp8l/deep-learning-lang-detection
|
93b6d24a38081597c610ecf9b1f3b92c7d669be5
|
2a54293181c1c2b1a2b840ddee4d4d80177efb33
|
refs/heads/master
| 2020-04-07T18:07:00.697994
| 2018-11-29T23:21:23
| 2018-11-29T23:21:23
| 158,597,498
| 0
| 0
|
MIT
| 2018-11-21T19:36:42
| 2018-11-21T19:36:41
| null |
UTF-8
|
Python
| false
| false
| 988
|
py
|
from flask import Flask
from flask.ext.restful import reqparse, abort, Api, Resource
from utils import get_controllers
app = Flask(__name__)
api = Api(app)
def get_controller_by_id(controller_id):
try:
return controllers[controller_id]
except IndexError:
abort(404, message="Controller {} doesn't exist".format(controller_id))
class ControllerListResource(Resource):
def get(self):
return [controller.state_as_dict() for controller in controllers]
class ControllerResource(Resource):
def get(self, controller_id):
controller = get_controller_by_id(controller_id)
return controller.state_as_dict()
api.add_resource(ControllerListResource, '/controllers')
api.add_resource(ControllerResource, '/controllers/<int:controller_id>')
if __name__ == '__main__':
controllers = get_controllers(read_only=True)
app.run(debug=True, use_reloader=False)
for controller in controllers:
controller.terminate()
|
[
"aliostad+github@gmail.com"
] |
aliostad+github@gmail.com
|
a93ea63288eff967bb4d9195f3c82744bd638f54
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_fasting.py
|
2a2b2ee81235039958aac103ee2d9541cc58f881
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
from xai.brain.wordbase.verbs._fast import _FAST
#calss header
class _FASTING(_FAST, ):
def __init__(self,):
_FAST.__init__(self)
self.name = "FASTING"
self.specie = 'verbs'
self.basic = "fast"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
90a9f66b973d50155b27452cee64bbbfb1800a9b
|
4dd1d8fa59e20061e2c12e540fc52b1b305e575b
|
/source/sims-2/boltz-bgk/s5/plot-sol.py
|
ff8f54a4239c7054fea5d9ce39a198fdbe5120d1
|
[
"MIT"
] |
permissive
|
ammarhakim/ammar-simjournal
|
f63521906a97d55ab290a5960d94758139944c89
|
5019f4723e20db80a20db6f2bd454c2fd3241412
|
refs/heads/master
| 2023-06-08T08:18:11.722779
| 2023-06-02T15:06:43
| 2023-06-02T15:06:43
| 204,050,516
| 3
| 3
| null | 2022-02-01T16:53:13
| 2019-08-23T18:28:44
|
Lua
|
UTF-8
|
Python
| false
| false
| 3,194
|
py
|
import gkedata
import gkedgbasis
from pylab import *
import pylab
import tables
import math
import numpy
import pylab
import numpy
from matplotlib import rcParams
import matplotlib.pyplot as plt
# customization for figure
rcParams['lines.linewidth'] = 2
rcParams['font.size'] = 18
rcParams['xtick.major.size'] = 8 # default is 4
rcParams['xtick.major.width'] = 3 # default is 0.5
rcParams['ytick.major.size'] = 8 # default is 4
rcParams['ytick.major.width'] = 3 # default is 0.5
rcParams['figure.facecolor'] = 'white'
#rcParams['figure.subplot.bottom'] = 0.125
#rcParams['figure.subplot.right'] = 0.85 # keep labels/ticks of colobar in figure
rcParams['image.interpolation'] = 'none'
rcParams['image.origin'] = 'lower'
rcParams['contour.negative_linestyle'] = 'solid'
rcParams['savefig.bbox'] = 'tight'
# Math/LaTex fonts:
# http://matplotlib.org/users/mathtext.html
# http://matplotlib.org/users/usetex.html
# Example: xlabel(r'$t \cdot l / V_{A,bc}$')
rcParams['mathtext.default'] = 'regular' # match the font used for regular text
def IE(n, nu, E):
return 0.5*(E-nu**2/n)
# density plot
d = gkedata.GkeData("../s5/s5-bgk-boltz_numDensity_5.h5")
dg1 = gkedgbasis.GkeDgLobatto1DPolyOrder2Basis(d)
Xc, n2 = dg1.project(0)
d = gkedata.GkeData("../s6/s6-bgk-boltz_numDensity_5.h5")
dg1 = gkedgbasis.GkeDgLobatto1DPolyOrder2Basis(d)
Xc, n3 = dg1.project(0)
nEul = loadtxt("../m2/m2-euler-shock-exact-density.txt")
figure(1)
plot(Xc, n2, '-r', label='Kn=1/100')
plot(Xc, n3, '-b', label='Kn=1/1000')
plot(nEul[:,0], nEul[:,1], 'k--')
xlabel('X')
ylabel('Density')
legend(loc='best')
savefig('jets-density-cmp.png', dpi=200)
# momentum plot
d = gkedata.GkeData("../s5/s5-bgk-boltz_momentum_5.h5")
dg1 = gkedgbasis.GkeDgLobatto1DPolyOrder2Basis(d)
Xc, nu2 = dg1.project(0)
d = gkedata.GkeData("../s6/s6-bgk-boltz_momentum_5.h5")
dg1 = gkedgbasis.GkeDgLobatto1DPolyOrder2Basis(d)
Xc, nu3 = dg1.project(0)
uEul = loadtxt("../m2/m2-euler-shock-exact-velocity.txt")
figure(2)
plot(Xc, nu2/n2, '-r', label='Kn=1/100')
plot(Xc, nu3/n3, '-b', label='Kn=1/1000')
plot(uEul[:,0], uEul[:,1], 'k--')
xlabel('X')
ylabel('Velocity')
legend(loc='best')
savefig('jets-velocity-cmp.png', dpi=200)
# internal energy plot
d = gkedata.GkeData("../s5/s5-bgk-boltz_ptclEnergy_5.h5")
dg1 = gkedgbasis.GkeDgLobatto1DPolyOrder2Basis(d)
Xc, E2 = dg1.project(0)
d = gkedata.GkeData("../s6/s6-bgk-boltz_ptclEnergy_5.h5")
dg1 = gkedgbasis.GkeDgLobatto1DPolyOrder2Basis(d)
Xc, E3 = dg1.project(0)
pEul = loadtxt("../m2/m2-euler-shock-exact-pressure.txt")
figure(3)
plot(Xc, IE(n2, nu2, E2), '-r', label='Kn=1/100')
plot(Xc, IE(n3, nu3, E3), '-b', label='Kn=1/1000')
plot(pEul[:,0], pEul[:,1]/(3-1), 'k--')
xlabel('X')
ylabel('Particle Energy')
legend(loc='best')
savefig('jets-ptclInternalEnergy-cmp.png', dpi=200)
figure(4)
plot(Xc, 0.5*E2, '-r', label='Kn=1/100')
plot(Xc, 0.5*E3, '-b', label='Kn=1/1000')
plot(pEul[:,0], 0.5*nEul[:,1]*uEul[:,1]**2+pEul[:,1]/(3-1), 'k--')
xlabel('X')
ylabel('Particle Energy')
legend(loc='best')
savefig('jets-ptclEnergy-cmp.png', dpi=200)
show()
|
[
"11265732+ammarhakim@users.noreply.github.com"
] |
11265732+ammarhakim@users.noreply.github.com
|
78568ca4885a42d8f3f9605cd773cdac043a3fda
|
27317b3adb1ccd99afa86cb931d2d14e23b9b175
|
/bcs-app/backend/apps/cluster/migrations/0011_auto_20180514_1805.py
|
7246c2cb4817e9db1fb1c09afade48a95c1a0502
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unicode",
"ICU",
"LicenseRef-scancode-unknown-license-reference",
"Artistic-2.0",
"Zlib",
"LicenseRef-scancode-openssl",
"NAIST-2003",
"ISC",
"NTP",
"BSL-1.0",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"MIT"
] |
permissive
|
freyzheng/bk-bcs-saas
|
cf5a6c4ab1c20959bda1362bc31de7884451acd7
|
96373cda9d87038aceb0b4858ce89e7873c8e149
|
refs/heads/master
| 2021-07-05T04:11:08.555930
| 2020-09-22T12:26:37
| 2020-09-22T12:26:37
| 201,279,048
| 0
| 1
|
NOASSERTION
| 2020-09-16T03:07:16
| 2019-08-08T14:48:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,794
|
py
|
# -*- coding: utf-8 -*-
#
# Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
# Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://opensource.org/licenses/MIT
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# Generated by Django 1.11.5 on 2018-05-14 10:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cluster', '0010_auto_20180224_2058'),
]
operations = [
migrations.AlterField(
model_name='clusterinstalllog',
name='oper_type',
field=models.CharField(choices=[('initialize', '集群初始化'), ('reinstall', '集群重新初始化'), ('initial_check', '前置检查'), ('removing', '删除集群'), ('so_initial', 'SO 机器初始化')], default='initialize', max_length=16),
),
migrations.AlterField(
model_name='nodeupdatelog',
name='oper_type',
field=models.CharField(choices=[('initialize', '节点初始化'), ('reinstall', '节点重新初始化'), ('removing', '节点移除'), ('initial_check', '前置检查'), ('so_initial', 'SO 机器初始化')], default='initialize', max_length=16),
),
]
|
[
"gejun.coolfriend@gmail.com"
] |
gejun.coolfriend@gmail.com
|
96e1b7d4ba508da9f2c0883b2ba7a362efde32d5
|
b9878c92b857f73ff0452fc51c822cfc9fa4dc1c
|
/watson_machine_learning_client/libs/repo/util/base_singleton.py
|
4a9943aa7d08aaa332d2f94d4ddaafc4ea0a0726
|
[] |
no_license
|
DavidCastilloAlvarado/WMLC_mod
|
35f5d84990c59b623bfdd27369fe7461c500e0a5
|
f2673b9c77bd93c0e017831ee4994f6d9789d9a1
|
refs/heads/master
| 2022-12-08T02:54:31.000267
| 2020-09-02T15:49:21
| 2020-09-02T15:49:21
| 292,322,284
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
class _Singleton(type):
""" A metaclass that creates a Singleton base class when called. """
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(_Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class BaseSingleton(_Singleton('SingletonMeta', (object,), {})):
pass
|
[
"dcastilloa@uni.pe"
] |
dcastilloa@uni.pe
|
355a8a6a7493e09e033a44bc139d2aa4314b07e5
|
f27c49458bde84048e6008da8c52ca0f1ae711ce
|
/code/05-interactive-code/m-n-m/guessinggame.py
|
f919ba7aeb09373c7ec432efe4f22638bff7f3d2
|
[
"MIT"
] |
permissive
|
talkpython/python-for-absolute-beginners-course
|
54b0f48b5edbf7755de6ca688a8e737ba16dc2fc
|
1930dab0a91526863dc92c3e05fe3c7ec63480e1
|
refs/heads/master
| 2022-11-24T03:02:32.759177
| 2022-11-08T14:30:08
| 2022-11-08T14:30:08
| 225,979,578
| 2,287
| 1,059
|
MIT
| 2022-11-07T19:45:15
| 2019-12-05T00:02:31
|
Python
|
UTF-8
|
Python
| false
| false
| 656
|
py
|
import random
print("------------------------------")
print(" M&M guessing game!")
print("------------------------------")
print("Guess the number of M&Ms and you get lunch on the house!")
print()
mm_count = random.randint(1, 100)
attempt_limit = 5
attempts = 0
while attempts < attempt_limit:
guess_text = input("How many M&Ms are in the jar? ")
guess = int(guess_text)
attempts += 1
if mm_count == guess:
print(f"You got a free lunch! It was {guess}.")
break
elif guess < mm_count:
print("Sorry, that's too LOW!")
else:
print("That's too HIGH!")
print(f"Bye, you're done in {attempts}!")
|
[
"mikeckennedy@gmail.com"
] |
mikeckennedy@gmail.com
|
dce66acaecaeb92ead8da8165aa063f5144d1414
|
0c005f75771101fdea1f647f124343077af19c36
|
/test_word_break.py
|
3a437518d9123157cc2a7afd251265b23f0ac32f
|
[
"MIT"
] |
permissive
|
brigitteunger/katas
|
19ff80a43d1c8fe0e6a49a6790495e716f09f10d
|
3f9af88fe5d98753360457084741f573c863dc25
|
refs/heads/master
| 2023-01-01T00:57:45.294204
| 2020-10-13T21:01:18
| 2020-10-13T21:01:18
| 265,810,827
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,603
|
py
|
import unittest
from typing import List, Set
from data_word_break import s_2, wordDict_2
class Solution:
def wordBreak(self, s: str, wordDict: List[str]) -> bool:
if not wordDict:
return []
set_words = set(wordDict)
dp = [False]*(len(s)+1)
dp[0] = True
for i in range(1, len(s)+1):
for j in range(0, i):
if dp[j] is True and s[j:i] in set_words:
dp[i] = True
break
return dp[-1]
class TestFindWords(unittest.TestCase):
def setUp(self):
self.sol = Solution()
def testWordBreak_1(self):
s = "leetcode"
word_dict = ["leet", "code"]
segmented = self.sol.wordBreak(s, word_dict)
self.assertTrue(segmented)
def testWordBreak_2(self):
s = "applepenapple"
word_dict = ["apple", "pen"]
segmented = self.sol.wordBreak(s, word_dict)
self.assertTrue(segmented)
def testWordBreak_3(self):
s = "catsandog"
word_dict = ["cats", "dog", "sand", "and", "cat"]
segmented = self.sol.wordBreak(s, word_dict)
self.assertFalse(segmented)
def testWordBreak_4(self):
s = "goalspecial"
word_dict = ["go", "goal", "goals", "special"]
segmented = self.sol.wordBreak(s, word_dict)
self.assertTrue(segmented)
def testWordBreak_5(self):
s = s_2
word_dict = wordDict_2
segmented = self.sol.wordBreak(s, word_dict)
self.assertFalse(segmented)
if __name__ == "__main__":
unittest.main()
|
[
"brigitteunger@users.noreply.github.com"
] |
brigitteunger@users.noreply.github.com
|
dbc05861ff1d574b59cf13ace3a9c4f734503c16
|
2f330fc050de11676ab46b963b7878882e9b6614
|
/memsource_cli/models/linguist_v2.py
|
3fded445994ae511b98da76e5f74617d09e0bc7b
|
[
"Apache-2.0"
] |
permissive
|
zerodayz/memsource-cli-client
|
609f48c18a2b6daaa639d4cb8a61da43763b5143
|
c2574f1467539a49e6637c874e88d75c7ef789b3
|
refs/heads/master
| 2020-08-01T12:43:06.497982
| 2019-09-30T11:14:13
| 2019-09-30T11:14:13
| 210,999,654
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,216
|
py
|
# coding: utf-8
"""
Memsource REST API
Welcome to Memsource's API documentation. To view our legacy APIs please [visit our documentation](https://wiki.memsource.com/wiki/Memsource_API) and for more information about our new APIs, [visit our blog](https://www.memsource.com/blog/2017/10/24/introducing-rest-apis-qa-with-the-memsource-api-team/). If you have any questions, please contact [Memsource Support](<mailto:support@memsource.com>). # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from memsource_cli.models.abstract_project_dto_v2 import AbstractProjectDtoV2 # noqa: F401,E501
from memsource_cli.models.domain_reference import DomainReference # noqa: F401,E501
from memsource_cli.models.reference_file_reference import ReferenceFileReference # noqa: F401,E501
from memsource_cli.models.sub_domain_reference import SubDomainReference # noqa: F401,E501
from memsource_cli.models.user_reference import UserReference # noqa: F401,E501
class LinguistV2(AbstractProjectDtoV2):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""LinguistV2 - a model defined in Swagger""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(LinguistV2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, LinguistV2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"cerninr@gmail.com"
] |
cerninr@gmail.com
|
cc283b9b4b09f6bf5595826d7c51710a2bbd1948
|
b72dbc51279d3e59cb6410367b671f8a956314c1
|
/프로그래머스/그외/1844_게임맵 최단거리.py
|
0aa107ad05fc1b98b72ecda8ab28d2ebc0eba2d7
|
[] |
no_license
|
ddobokki/coding-test-practice
|
7b16d20403bb1714d97adfd1f47aa7d3ccd7ea4b
|
c88d981a1d43b986169f7884ff3ef1498e768fc8
|
refs/heads/main
| 2023-07-08T15:09:32.269059
| 2021-08-08T12:19:44
| 2021-08-08T12:19:44
| 344,116,013
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,399
|
py
|
#https://programmers.co.kr/learn/courses/30/lessons/1844
from collections import deque
def solution(maps):
answer = 0
dx = [1, -1, 0, 0] # 순서대로 동, 서, 남, 북
dy = [0, 0, 1, -1]
visit = [[-1] * len(maps[0]) for _ in range(len(maps))] # 거리 측정 및, 방문을 확인하는 visit 배열
visit[0][0] = 1 # visit이 -1이면 아직 방문을 안했다는 뜻, 탐색이 끝나고도 도달 못하면 -1을 리턴해야하므로 -1로 초기화
q = deque([(0, 0)]) # 0,0에서 시작
while q:
x, y = q.popleft() # q에서 현재 좌표를 꺼낸다.
for i in range(4):
nx, ny = x + dx[i], y + dy[i] # 순서대로 동서남북의 좌표
if (0 <= nx < len(maps[0])) and (0 <= ny < len(maps)): # 각 루프마다 동서남북으로 갈수 있는 곳인지 확인
if (maps[ny][nx] == 1) and (visit[ny][nx] == -1):
# 갈수 있는 조건 -> 맵 밖이 아니고, visit하지 않았으며 맵이 1이어야 한다.
visit[ny][nx] = visit[y][x] + 1 # 현재 visit이 거리이므로 다음칸은 visit에 1을 더한값이 이동한 거리
q.append((nx, ny)) # 다음 좌표를 q에 삽입
return visit[-1][-1]
#map = [[1, 0, 1, 1, 1], [1, 0, 1, 0, 1], [1, 0, 1, 1, 1], [1, 1, 1, 0, 1], [0, 0, 0, 0, 1]]
#print(solution(map))
|
[
"44228269+ddobokki@users.noreply.github.com"
] |
44228269+ddobokki@users.noreply.github.com
|
9558078b495c9f41b5bcc8fde64f93bfb7668f33
|
ec87c361be4a2f9f842695b6a6e8601ebd735e83
|
/GuessNum.py
|
ee225ea9d692c0e4fc54bd50da1e7441a632581c
|
[] |
no_license
|
heheddff/python2018
|
357d51bee7ea39f6a1df82101fb49c1568250e24
|
77a240dd155f679fffe33b62df57f689a8c85082
|
refs/heads/master
| 2020-03-27T23:13:38.789249
| 2018-12-08T14:55:21
| 2018-12-08T14:55:21
| 147,302,979
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 488
|
py
|
#GuessNum
import random as rd
target = rd.randint(1,1000)
count = 0
while True:
try:
guess = eval(input("请输入一个猜测的整数(1至1000):"))
except:
print('输入有误,请重试,此次不计入猜测次数!')
continue
count +=1
if guess > target:
print("猜大了")
elif guess < target:
print("猜小了")
else:
print("猜对了")
break
print("此论猜测的次数是:{}".format(count))
|
[
"qq2003qq@126.com"
] |
qq2003qq@126.com
|
811c54e32c59559195243cf283c1baeaf6bea67e
|
41ede4fd3bfba1bff0166bca7aee80dcf21434c6
|
/ayhanyalcinsoy/Desktop/xfce/addon/thunar-archive-plugin/actions.py
|
105c7d47862ca22da89c73e94b0087b806747df5
|
[] |
no_license
|
pisilinux/playground
|
a7db4b42559a21cc72fd4c8649e0231ab6a3eb3c
|
e4e12fff8a847ba210befc8db7e2af8556c3adf7
|
refs/heads/master
| 2022-08-12T23:03:27.609506
| 2022-08-11T18:28:19
| 2022-08-11T18:28:19
| 8,429,459
| 16
| 22
| null | 2022-08-11T18:28:20
| 2013-02-26T09:37:11
|
Python
|
UTF-8
|
Python
| false
| false
| 683
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import autotools
from pisi.actionsapi import get
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
def setup():
shelltools.export("LDFLAGS", "%s -lgtk-x11-2.0 -lthunarx-2" % get.LDFLAGS())
autotools.configure("--disable-static \
--disable-dependency-tracking")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("AUTHORS", "ChangeLog", "COPYING", "NEWS", "README")
|
[
"ayhanyalcinsoy@gmail.com"
] |
ayhanyalcinsoy@gmail.com
|
e98d8dcea92717dc00bba19ef5d887b8d521e12e
|
ba949e02c0f4a7ea0395a80bdc31ed3e5f5fcd54
|
/problems/dp/Solution115.py
|
aec8a435c4f15acf75a0391afcaed2b0f9481f66
|
[
"MIT"
] |
permissive
|
akaliutau/cs-problems-python
|
6bc0a74064f6e9687fe58b13763da1fdf2e1f626
|
9b1bd8e3932be62135a38a77f955ded9a766b654
|
refs/heads/master
| 2023-05-11T22:19:06.711001
| 2021-06-04T11:14:42
| 2021-06-04T11:14:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,170
|
py
|
""" Given two strings s and t, return the number of distinct subsequences of s
which equals t. A string's subsequence is a new string formed from the
original string by deleting some (can be none) of the characters without
disturbing the relative positions of the remaining characters. (i.e., "ACE"
is a subsequence of "ABCDE" while "AEC" is not). It's guaranteed the answer
fits on a 32-bit signed integer.
Example 1: Input: s = "rabbbit", t = "rabbit" Output: 3
Explanation: As shown below, there are 3 ways you can
generate "rabbit" from S.
____ __
rabbbit
__ ____
rabbbit
___ ___
rabbbit
IDEA:
lets t=ra, s=ram
if we have a sequence
[ra]
number of distinct subsequences, layer by layer:
i=0, t="", [""] ["r"] ["ra"] ["ram"]
\
i=1, t="r", [""]<-["r"]<-["r"]<- ["r"]
i=2, t="ra", [""]<-["r"]<-["r"]<- ["r"]
where
\ = use data from previous iteration, like this
["", r, a, ra] = [{"",r} + {"",r} * a]
<- = copy prev result-set
["", r, a, ra] --> ["", r, a, ra]
"""
class Solution115:
pass
|
[
"aliaksei.kaliutau@gmail.com"
] |
aliaksei.kaliutau@gmail.com
|
a92309f4c06a45e9fc8a12855d0fbe22d95c8feb
|
a0c53168a4bdcfb0aa917d6d2c602f0999443a10
|
/projexui/widgets/xurlwidget.py
|
40516a9749b83fb4e82b2ccb12a331191a731d1e
|
[] |
no_license
|
kanooshka/DPS_PIPELINE
|
8067154c59ca5c8c9c09740969bb6e8537021903
|
df2fcdecda5bce98e4235ffddde1e99f334562cc
|
refs/heads/master
| 2021-05-24T04:32:03.457648
| 2018-09-07T13:25:11
| 2018-09-07T13:25:11
| 29,938,064
| 3
| 2
| null | 2020-07-23T23:06:37
| 2015-01-27T22:26:01
|
Python
|
UTF-8
|
Python
| false
| false
| 3,438
|
py
|
""" Defines the XUrlWidget class """
# define authorship information
__authors__ = ['Eric Hulser']
__author__ = ','.join(__authors__)
__credits__ = []
__copyright__ = 'Copyright (c) 2011, Projex Software, LLC'
__license__ = 'LGPL'
__maintainer__ = 'Projex Software, LLC'
__email__ = 'team@projexsoftware.com'
import webbrowser
from projexui import qt
from PyQt4.QtCore import Qt
from PyQt4.QtGui import QWidget,\
QHBoxLayout,\
QIcon,\
QToolButton
from projexui.widgets.xlineedit import XLineEdit
from projexui import resources
class XUrlWidget(QWidget):
urlChanged = qt.Signal(str)
urlEdited = qt.Signal()
def __init__( self, parent ):
super(XUrlWidget, self).__init__(parent)
# define the interface
self._urlEdit = XLineEdit(self)
self._urlButton = QToolButton(self)
self._urlButton.setAutoRaise(True)
self._urlButton.setIcon(QIcon(resources.find('img/web.png')))
self._urlButton.setToolTip('Browse Link')
self._urlButton.setFocusPolicy(Qt.NoFocus)
self._urlEdit.setHint('http://')
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
layout.addWidget(self._urlEdit)
layout.addWidget(self._urlButton)
self.setLayout(layout)
self.setFocusPolicy(Qt.StrongFocus)
# create connections
self._urlEdit.textChanged.connect(self.urlChanged)
self._urlEdit.textEdited.connect(self.urlEdited)
self._urlButton.clicked.connect(self.browse)
def blockSignals( self, state ):
"""
Blocks the signals for this widget and its sub-parts.
:param state | <bool>
"""
super(XUrlWidget, self).blockSignals(state)
self._urlEdit.blockSignals(state)
self._urlButton.blockSignals(state)
def browse( self ):
"""
Brings up a web browser with the address in a Google map.
"""
webbrowser.open(self.url())
def hint( self ):
"""
Returns the hint associated with this widget.
:return <str>
"""
return self._urlEdit.hint()
def lineEdit( self ):
"""
Returns the line edit linked with this widget.
:return <XLineEdit>
"""
return self._urlEdit
def setFocus(self):
"""
Sets the focus for this widget on its line edit.
"""
self._urlEdit.setFocus()
@qt.Slot(str)
def setHint( self, hint ):
"""
Sets the hint associated with this widget.
:param hint | <str>
"""
self._urlEdit.setHint(hint)
@qt.Slot(str)
def setUrl( self, url ):
"""
Sets the url for this widget to the inputed url.
:param url | <str>
"""
self._urlEdit.setText(str(url))
def url( self ):
"""
Returns the current url from the edit.
:return <str>
"""
return str(self._urlEdit.text())
x_hint = qt.Property(str, hint, setHint)
x_url = qt.Property(str, url, setUrl)
__designer_plugins__ = [XUrlWidget]
|
[
"kanooshka@gmail.com"
] |
kanooshka@gmail.com
|
1401a17efdbfb7b2ff484178a6944d5e373dd1f7
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03488/s626861342.py
|
65eae1c60abd7285830f846f3f83e1f7681f124f
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,207
|
py
|
s=input()
X,Y=map(int,input().split())
from collections import defaultdict
def solve(da,dp,G):
for i in range(len(da)):
tmp=set()
for j in dp:
tmp.add(j-da[i])
tmp.add(j+da[i])
dp=tmp
if G in dp:
return True
else:
return False
"""amax=0
for i in a:
amax+=abs(i)
if amax==0:
if G==0:
return True
else:
return False
if a[0]==G:
return True
dp=[[0]*2*amax for _ in range(3)]
dp[0][a[0]]=1
for i in range(1,len(a)):
p=a[i]
for j in range(-amax,amax):
dp[i%3][j-p]+=dp[(i-1)%3][j]
dp[i%3][j+p]+=dp[(i-1)%3][j]
#print(dp)
if dp[i%3][G]>=1:
return True
return False"""
"""#print(a)
dp=set()
dp.add(a[0])
dp2=set()
for i in range(1,len(a)):
for j in dp:
dp2.add(j-a[i])
dp2.add(j+a[i])
dp=copy.deepcopy(dp2)
#print(dp)
if G in dp:
return True
return False"""
d=[len(x) for x in s.split("T")]
dx=d[2::2]
dy=d[1::2]
if solve(dx,{d[0]},X) and solve(dy,{0},Y):
print("Yes")
else:
print("No")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
e87293f0dbca65cf9e8eb987d30bb7c5e8ed590e
|
29b1b15e4fef90717ff7bf8b13ab9a23cdc17c51
|
/finalproduct/testapp/migrations/0003_comments.py
|
24bc341b3f011285e4bdec6c57d8b347120c9b42
|
[] |
no_license
|
deepawalekedar319/DjangoProjects
|
93fe59812593a1e1b8f542c8c5b1642bc95f6da4
|
1780b703a3022ea17dc188ad98b0f17bb14fa12f
|
refs/heads/main
| 2023-09-03T04:48:21.201822
| 2021-11-08T05:28:00
| 2021-11-08T05:28:00
| 425,706,071
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,135
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2020-10-31 13:23
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('testapp', '0002_auto_20201027_1439'),
]
operations = [
migrations.CreateModel(
name='Comments',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32)),
('email', models.EmailField(max_length=254)),
('body', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('active', models.BooleanField(default=True)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='testapp.Post')),
],
options={
'ordering': ('-created',),
},
),
]
|
[
"deepawalekedar319@gmail.com"
] |
deepawalekedar319@gmail.com
|
5631077333222d3476b68d4a173ce9d25f7396be
|
caf8cbcafd448a301997770165b323438d119f5e
|
/.history/mercari/mercari_search_20201124184144.py
|
21031f8d0dc5f3408e41cd1680d9f13d31082892
|
[
"MIT"
] |
permissive
|
KustomApe/nerdape
|
03e0691f675f13ce2aefa46ee230111247e90c72
|
aef6fb2d1f8c364b26d91bf8570b4487a24de69a
|
refs/heads/main
| 2023-01-23T10:13:26.584386
| 2020-11-28T22:29:49
| 2020-11-28T22:29:49
| 309,897,105
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,776
|
py
|
from selenium import webdriver
from selenium.webdriver.support.ui import Select
import pandas as pd
import re
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import PyQt5
import time
"""[Initial Settings]
初期設定
"""
options = webdriver.ChromeOptions()
options.add_argument('--headeless')
options.add_argument('--disable-gpu')
options.add_argument('--lang-ja')
browser = webdriver.Chrome(chrome_options=options, executable_path='./chromedriver')
"""[CSS Selector Settings]
CSSセレクターの設定
"""
PAGER = "li.pager-next"
body > div.default-container > main > div.l-content > ul > li.pager-next.visible-pc > ul > li:nth-child(1) > a
word = input("検索したいキーワードを入力してください:")
while True:
if PAGER:
n = 1
res = browser.get("https://www.mercari.com/jp/search/?page="+str(n)+"&keyword="+word)
df_main = pd.DataFrame(columns=['在庫有無','タイトル','値段','URL'])
df_graf = pd.DataFrame(columns=['SOLD','PRICE'])
browser.get(res)
item_boxlist = browser.find_elements_by_css_selector(".items-box")
for item_box in item_boxlist:
try:
if len(item_box.find_elements_by_css_selector(".item-sold-out-badge")) > 0:
sold = "SOLD"
else:
sold = "NOT SOLD"
sub_title = item_box.find_element_by_class_name("items-box-body")
title = sub_title.find_element_by_tag_name("h3").text
item_price = item_box.find_element_by_css_selector(".items-box-price")
price_text = item_price.text
price_text = re.sub(r",", "", price_text).lstrip("¥ ")
price_text_int = int(price_text)
print(price_text_int)
url = item_box.find_element_by_tag_name("a").get_attribute("href")
data = pd.Series( [ sold,title,price_text_int,url ], index=df_main.columns )
grdata = pd.Series( [ sold,price_text_int ], index=df_graf.columns )
df_main = df_main.append( data, ignore_index=True )
df_graf = df_graf.append( grdata, ignore_index=True )
except Exception as e:
print(e)
btn = browser.find_element_by_css_selector(PAGER).get_attribute('href')
n += 1
print('next url:{}'.format(btn))
time.sleep(3)
browser.get(btn)
print('Moving to next page...')
else:
print('No items anymore...')
break
print(df_main)
sns.stripplot(x='SOLD', y='PRICE', data=df_graf)
plt.show()
sns.pairplot(df_graf,hue="SOLD")
plt.show()
print('Writing out to CSV file...')
df_main.to_csv("pricedata.csv", encoding="utf_8_sig")
print("Done")
|
[
"kustomape@gmail.com"
] |
kustomape@gmail.com
|
64cfaf128c32f6121b1d5cd6194329ba27f2532b
|
c0792645c156cb9e20a1aa2b28c565150358bc6e
|
/apps/inmueble/migrations/0007_auto_20180413_2159.py
|
67c3d2b9cf04f9cc969b9db636a4659a8eea6221
|
[] |
no_license
|
clioo/Praver
|
b22fd92886e0399845adb4366663cae6a7d7853b
|
523f0d78e0a2039a5bae3e539c93e2c2415a0840
|
refs/heads/master
| 2020-03-11T12:38:54.272392
| 2018-06-28T18:24:21
| 2018-06-28T18:24:21
| 130,003,043
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 636
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2018-04-14 03:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inmueble', '0006_imagenesinmbueble'),
]
operations = [
migrations.AlterField(
model_name='inmueble',
name='latitud',
field=models.CharField(blank=True, max_length=100),
),
migrations.AlterField(
model_name='inmueble',
name='longitud',
field=models.CharField(blank=True, max_length=100),
),
]
|
[
"jesus_acosta1996@hotmail.com"
] |
jesus_acosta1996@hotmail.com
|
9df488729a33c40b7f0a79805a0e490939c392cc
|
de06c4a1fb02fd23eadfc58c770d87edfd0a6d38
|
/group_buying/payment/migrations/0002_auto_20200829_0923.py
|
da6c064bd2bf89bfec813229cb49073801216a4d
|
[] |
no_license
|
saisantoshchirag/group_buying
|
c12dc0bf1882cf03d20e6865dd98105a28907f80
|
1d9fb28f99dfb9b085e43bb5429bde476680ffa7
|
refs/heads/master
| 2023-08-15T01:27:56.203321
| 2020-11-13T11:16:36
| 2020-11-13T11:16:36
| 267,057,651
| 0
| 1
| null | 2021-09-22T19:21:23
| 2020-05-26T13:58:14
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,038
|
py
|
# Generated by Django 2.1.5 on 2020-08-29 03:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('payment', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='orders',
name='address',
),
migrations.RemoveField(
model_name='orders',
name='city',
),
migrations.RemoveField(
model_name='orders',
name='email',
),
migrations.RemoveField(
model_name='orders',
name='items_json',
),
migrations.RemoveField(
model_name='orders',
name='name',
),
migrations.RemoveField(
model_name='orders',
name='phone',
),
migrations.RemoveField(
model_name='orders',
name='state',
),
migrations.RemoveField(
model_name='orders',
name='zip_code',
),
]
|
[
"saisantosh.c17@iiits.in"
] |
saisantosh.c17@iiits.in
|
538215aa0cc6b8084fff013b4fd1dac21131423c
|
523f8f5febbbfeb6d42183f2bbeebc36f98eadb5
|
/80_best.py
|
c500e3032fd796de2b2a3073cdc4baa3dbdbb67f
|
[] |
no_license
|
saleed/LeetCode
|
655f82fdfcc3000400f49388e97fc0560f356af0
|
48b43999fb7e2ed82d922e1f64ac76f8fabe4baa
|
refs/heads/master
| 2022-06-15T21:54:56.223204
| 2022-05-09T14:05:50
| 2022-05-09T14:05:50
| 209,430,056
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 608
|
py
|
class Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
n=0
p=0
pre=float("inf")
for i in range(len(nums)):
if nums[i]==pre:
if n==2:
continue
else:
n+=1
nums[p]=nums[i]
p+=1
else:
n=1
nums[p]=nums[i]
p+=1
pre=nums[i]
return p
a=Solution()
test=[1,1,1,2,2,3]
print(a.removeDuplicates(test))
|
[
"noelsun@mowennaierdeMacBook-Pro.local"
] |
noelsun@mowennaierdeMacBook-Pro.local
|
ce9a504baf33919b24dc53bdf46a87dc45cd164e
|
fa45fe7eaba7ef7c27ecf95db7c460ca189ce0d4
|
/everydays/day002/flask_test/hm_07_helloflask.py
|
c5e844f1d49376ac75384f887e29197ae23fd8cb
|
[] |
no_license
|
jake20001/Hello
|
be1a2bb5331f2ad4c1d8f30c6a9a530aff79e605
|
08217871bb17152eb09e68cd154937ebe5d59d2c
|
refs/heads/master
| 2021-07-10T09:48:15.883716
| 2021-04-23T14:49:03
| 2021-04-23T14:49:03
| 56,282,358
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,237
|
py
|
from datetime import timedelta
from flask import Flask, session,jsonify
app = Flask(__name__)
# 设置应用秘钥会被用于session签名
app.secret_key = 'test'
# 设置session过期时间 默认31天
print(f'默认过期时间: {app.permanent_session_lifetime}')
# 通过赋值一个 timedelta 对象来修改 session 的过期时间
app.permanent_session_lifetime = timedelta(days=0,seconds=20)
print(f'测试过期时间: {app.permanent_session_lifetime}')
@app.route('/session')
def get_session():
# session是一个类字典对象
print(session)
return jsonify({key: value for key, value in session.items()})
@app.route('/session/set')
def set_session():
# session是一个类字典对象, 对其取值/赋值 就可以实现session数据的读写
# 记录session数据
session['username'] = 'zhangsan'
session['age'] = 100
return "set session"
@app.route('/session/delete')
def delete_session():
# 使用 del 来删除 session 的 key,但是要判断 key 是否在 session,如果不判断可能会出现异常
if 'username' in session:
del session['username']
return "delete session"
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True)
|
[
"jianke.zhang@beantechs.com"
] |
jianke.zhang@beantechs.com
|
c9979f423a456cb880b77c2b8a584ec0c5691070
|
b007d88e6726452ffa8fe80300614f311ae5b318
|
/educative.io/coding_patterns/hash_maps/isomorphic_string.py
|
3f2177702a7f146a99345b2c40f7a05c9cd83761
|
[] |
no_license
|
jinurajan/Datastructures
|
ec332b12b8395f42cb769e771da3642f25ba7e7f
|
647fea5d2c8122468a1c018c6829b1c08717d86a
|
refs/heads/master
| 2023-07-06T14:42:55.168795
| 2023-07-04T13:23:22
| 2023-07-04T13:23:22
| 76,943,162
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,731
|
py
|
"""
Given two strings, check whether two strings are isomorphic to each other or not. Two strings are isomorphic if a fixed mapping exists from the characters of one string to the characters of the other string. For example, if there are two instances of the character "a" in the first string, both these instances should be converted to another character (which could also remain the same character if "a" is mapped to itself) in the second string. This converted character should remain the same in both positions of the second string since there is a fixed mapping from the character "a" in the first string to the converted character in the second string.
"""
def is_isomorphic(string1, string2):
# Write your code here
# your code will replace this placeholder return statement
if len(string1) != len(string2):
return False
map_1 = {} # str1 to str2 mapping
map_2 = {} # str2 to str1 mapping
for i in range(len(string1)):
char_1 = string1[i]
char_2 = string2[i]
if char_1 in map_1 and map_1[char_1] != char_2:
return False
if char_2 in map_2 and map_2[char_2] != char_1:
return False
map_1[char_1] = char_2
map_2[char_2] = char_1
return True
def is_isomorphic(string1, string2):
# Write your code here
# your code will replace this placeholder return statement
if len(string1) != len(string2):
return False
map_1 = {} # str1 to str2 mapping
map_2 = {} # str2 to str1 mapping
for char_1, char_2 in zip(string1,string2):
if char_1 in map_1 and map_1[char_1] != char_2:
return False
if char_2 in map_2 and map_2[char_2] != char_1:
return False
map_1[char_1] = char_2
map_2[char_2] = char_1
return True
|
[
"jinu.p.r@gmail.com"
] |
jinu.p.r@gmail.com
|
dc4ba9522892d2b29251cd8eab33b73c5fffbcf8
|
2d2c10ffa7aa5ee35393371e7f8c13b4fab94446
|
/projects/ai/mrc/haihua/mrc_guwen/loss.py
|
b36544f5e4359d2393243aba18e0a179e657b745
|
[] |
no_license
|
faker2081/pikachu2
|
bec83750a5ff3c7b5a26662000517df0f608c1c1
|
4f06d47c7bf79eb4e5a22648e088b3296dad3b2d
|
refs/heads/main
| 2023-09-02T00:28:41.723277
| 2021-11-17T11:15:44
| 2021-11-17T11:15:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,067
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
# \file loss.py
# \author chenghuige
# \date 2021-01-09 17:51:33.472128
# \Description
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import tensorflow as tf
import melt as mt
from .config import *
def loss_fn(y_true, y_pred, x, model):
pred = y_pred
pred = tf.cast(pred, tf.float32)
loss_func = tf.keras.losses.BinaryCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE)
loss = loss_func(y_true, pred)
loss = mt.reduce_over(loss)
return loss
def get_loss(model=None):
loss_fn_ = model.get_loss()
# loss_fn_ = loss_fn
# if not FLAGS.custom_loss:
# loss_fn_ = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# else:
# loss_fn_ = model.get_loss()
return loss_fn_
|
[
"chenghuige@gmail.com"
] |
chenghuige@gmail.com
|
b95feeca262a9036432d30423ce62dd23cffdd32
|
415fcefe59c8d33bc3f8b0784d48a7509ea7d5da
|
/addanother_example/models.py
|
f005bc5f0ebd821cc308a8ef2e021933eecd6f68
|
[] |
no_license
|
asifpy/django-quickstart
|
6f517699375015584a7d17f112b70b8eeff89762
|
0ff625915cf169d3fb2f9646d9838260629c1576
|
refs/heads/master
| 2021-01-11T11:19:22.446634
| 2017-05-04T05:28:55
| 2017-05-04T05:28:55
| 72,719,312
| 2
| 1
| null | 2017-05-04T05:28:56
| 2016-11-03T07:24:32
|
Python
|
UTF-8
|
Python
| false
| false
| 795
|
py
|
from django.db import models
class Team(models.Model):
name = models.CharField(max_length=20)
def __str__(self):
return self.name
class Player(models.Model):
name = models.CharField(max_length=20)
current_team = models.ForeignKey(
"Team", related_name="current_players",
help_text='This demonstrates the wrapper adding an "add" button only'
)
future_team = models.ForeignKey(
"Team", related_name="future_players",
help_text='This demonstrates the wrapper adding both an "add" and an "edit" button'
)
previous_teams = models.ManyToManyField(
"Team", related_name="ancient_players",
help_text="This demonstrates the wrapper on a ManyToMany field"
)
def __str__(self):
return self.name
|
[
"saluasif@gmail.com"
] |
saluasif@gmail.com
|
a511646d6604a9c524b484c4ff7546e7ca14116e
|
bc167f434158921bcf2c678155c5cdfec1c9b0c9
|
/PI_code/simulator/behaviourGeneration/firstGenScripts_preyHunter/behav457.py
|
245f22a2fc444ac2254832c1c88ff8828465938b
|
[] |
no_license
|
s0217391/DifferentProjects
|
6450efc89c64ecd21b86c705737e89e5c69433a6
|
7f4da153660817b6cbf72d2e823aa29c0c2f95a9
|
refs/heads/master
| 2021-01-17T02:58:46.219240
| 2015-05-26T22:45:46
| 2015-05-26T22:45:46
| 34,995,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,209
|
py
|
#!/usr/bin/python
import sys
def compute(prey):
temp0 = -1 * prey[1]
if temp0 != 0:
temp1 = temp0 / temp0
else:
temp1 = temp0
temp0 = -1 * prey[1]
temp1 = temp0 * prey[0]
if temp0 != 0:
temp2 = temp1 % temp0
else:
temp2 = temp0
temp3 = temp1 + prey[1]
if temp2 > temp0:
if temp3 != 0:
temp3 = temp3 % temp3
else:
temp3 = temp3
else:
if temp2 > temp0:
if temp3 != 0:
temp3 = prey[0] % temp3
else:
temp3 = temp3
else:
temp3 = temp3 * prey[0]
if temp3 != 0:
temp1 = temp1 / temp3
else:
temp1 = temp3
if prey[1] > temp3:
temp1 = temp2 * temp2
else:
temp1 = prey[1] + prey[1]
if temp0 != 0:
temp1 = prey[1] / temp0
else:
temp1 = temp0
temp0 = prey[0] + temp0
temp2 = prey[0] + temp3
temp4 = -1 * prey[1]
if temp3 != 0:
temp0 = temp1 % temp3
else:
temp0 = temp3
temp4 = prey[0] + temp2
temp3 = prey[1] + temp3
temp1 = max(prey[1], temp3)
temp2 = temp2 + prey[1]
if temp1 > prey[1]:
if prey[0] > prey[0]:
temp0 = -1 * temp1
else:
temp0 = temp1 + prey[0]
else:
if prey[1] != 0:
temp0 = temp0 / prey[1]
else:
temp0 = prey[1]
if temp3 != 0:
temp5 = prey[1] / temp3
else:
temp5 = temp3
return [prey[1], temp5]
|
[
"i7674211@bournemouth.ac.uk"
] |
i7674211@bournemouth.ac.uk
|
a5c866848db0a2d103e4eccf93def3588d598874
|
f20da8440bae10fe73900f787fc7781f23196325
|
/downsample/downsample_dense.py
|
ad5654289ac0181edcac53448c9e825628577396
|
[] |
no_license
|
ramesh720/recipe_zs2017_track2_phoneme
|
9c5cdb3066a84e5059153b1390802e700c66978e
|
f8bbd9b8e6ae4f542e52c2582eab1cf166923226
|
refs/heads/master
| 2020-04-29T11:07:47.406768
| 2018-01-13T13:03:46
| 2018-01-13T13:03:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,364
|
py
|
#!/usr/bin/env python
"""
Perform dense downsampling over indicated segmentation intervals.
Author: Herman Kamper
Contact: kamperh@gmail.com
Date: 2015-2017
"""
from datetime import datetime
from os import path
import argparse
import cPickle as pickle
import numpy as np
import scipy.signal as signal
import sys
OUTPUT_DIR = "embeddings"
#-----------------------------------------------------------------------------#
# UTILITY FUNCTIONS #
#-----------------------------------------------------------------------------#
def check_argv():
"""Check the command line arguments."""
parser = argparse.ArgumentParser(description=__doc__.strip().split("\n")[0], add_help=False)
parser.add_argument("lang", type=str, choices=["english", "french", "mandarin", "LANG1", "LANG2"])
parser.add_argument("subset", type=str, choices=["train"]) #, "test"])
# parser.add_argument("landmarks", type=str, choices=["gtphone", "unsup_syl"], help="landmarks set")
parser.add_argument("landmarks", type=str, choices=["unsup_syl"], help="landmarks set")
parser.add_argument(
# "feature_type", type=str, help="input feature type", choices=["mfcc", "cae.d_10", "cae.d_13"]
"feature_type", type=str, help="input feature type", choices=["mfcc", "okko0"]
)
parser.add_argument("--n", type=int, help="number of samples (default: %(default)s)", default=10)
parser.add_argument(
"--frame_dims", type=int, default=None,
help="only keep these number of dimensions"
)
parser.add_argument(
"--n_landmarks_max", type=int,
help="maximum number of landmarks to cross (default: %(default)s)", default=6
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def downsample_utterance(features, seglist, n):
"""
Return the downsampled matrix with each row an embedding for a segment in
the seglist.
"""
embeddings = []
for i, j in seglist:
y = features[i:j+1, :].T
y_new = signal.resample(y, n, axis=1).flatten("C")
embeddings.append(y_new)
return np.asarray(embeddings)
#-----------------------------------------------------------------------------#
# MAIN FUNCTION #
#-----------------------------------------------------------------------------#
def main():
args = check_argv()
if args.feature_type == "mfcc":
input_npz_fn = path.join(
"..", "features", "mfcc", args.lang + "_" + args.subset, "numpy", "mfcc.cmvn_dd.npz"
)
elif args.feature_type == "okko0":
input_npz_fn = path.join(
"..", "features", "okko0", args.lang + "_" + args.subset, "segments.npz"
)
else:
assert False
print("Reading: " + input_npz_fn)
input_npz = np.load(input_npz_fn)
d_frame = input_npz[input_npz.keys()[0]].shape[1]
print("No. of utterances: " + str(len(input_npz.keys())))
seglist_pickle_fn = path.join(
OUTPUT_DIR, args.lang + "_" + args.subset, "seglist." + args.landmarks
+ ".n_max_" + str(args.n_landmarks_max) + ".pkl"
)
print("Reading: " + seglist_pickle_fn)
with open(seglist_pickle_fn, "rb") as f:
seglist_dict = pickle.load(f)
print("No. of utterances: " + str(len(seglist_dict)))
print("Frame dimensionality: " + str(d_frame))
if args.frame_dims is not None and args.frame_dims < d_frame:
d_frame = args.frame_dims
print("Reducing frame dimensionality: " + str(d_frame))
print("No. of samples: " + str(args.n))
print(datetime.now())
print("Downsampling")
downsample_dict = {}
for i, utt in enumerate(input_npz.keys()):
downsample_dict[utt] = downsample_utterance(
input_npz[utt][:, :args.frame_dims], seglist_dict[utt], args.n
)
print(datetime.now())
output_npz_fn = path.join(
OUTPUT_DIR, args.lang + "_" + args.subset, "downsample_dense." + args.feature_type +
".n_" + str(args.n) + ".n_max_" + str(args.n_landmarks_max) + "." + args.landmarks + ".npz"
)
print("Writing: " + output_npz_fn)
np.savez_compressed(output_npz_fn, **downsample_dict)
if __name__ == "__main__":
main()
|
[
"kamperh@gmail.com"
] |
kamperh@gmail.com
|
8425cd0230586cba7d321dc4706f57f721a3c5d4
|
b246bdb4ae3d845bbf8dee704b8936c32211c0f5
|
/Figure_1/initial_subtyping/do_tsne.py
|
fe059a8bd9f38cb0c2f026356b59d034111066fc
|
[] |
no_license
|
KnottLab/bladder-snSeq
|
abfd3d77a04250622e6a28d84878e5adcd335d00
|
2e087dc745046e30c2814ab3e4c295bfa34e6820
|
refs/heads/master
| 2023-04-07T13:36:44.794889
| 2021-12-08T15:37:45
| 2021-12-08T15:37:45
| 323,445,511
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,966
|
py
|
#!/usr/bin/env python
import numpy as np
import argparse
from load_data import load_data
from MulticoreTSNE import MulticoreTSNE as TSNE
try:
import cuml
CUML_FLAG=True
except:
print('[DO_TSNE] WARNING failed to import cuML. GPU accelerated TSNE will not be available.')
CUML_FLAG=False
"""
Modules have two modes: standalone from command line and pipelined
Both modes accept a preprocessed AnnData object as input.
Standalone mode writes back a AnnData with new metadata
Pipelined mode returns the AnnData object with new metadata
UMAPs with /umap-learn/cuML GPU-accelerated UMAP implementation
https://umap-learn.readthedocs.io/en/latest/
https://github.com/lmcinnes/umap
"""
def do_tsne(adata, ARGS):
latent = adata.obsm[ARGS.latent_key]
if ARGS.gpu and CUML_FLAG:
print('[DO_TSNE] Using cuML GPU-accelerated TSNE')
umap_class = cuml.UMAP
if ARGS.metric != 'euclidean':
print('[DO_TSNE] cuML TSNE requres euclidean distance metric.')
emb = cuml.TSNE(
perplexity = ARGS.perplexity,
learning_rate = ARGS.learning_rate,
early_exaggeration = ARGS.early_exaggeration,
).fit_transform(latent)
else:
print('[DO_TSNE] Using MulticoreTSNE')
emb = TSNE( perplexity = ARGS.perplexity,
metric = ARGS.metric,
verbose = False, n_jobs=ARGS.n_jobs).fit_transform(latent)
print(f'[DO_TSNE] placing embedding {emb.shape} in key {ARGS.tsne_key}')
adata.obsm[ARGS.tsne_key] = emb
print(f'[DO_TSNE] recording tSNE args')
adata.uns['tSNE_args'] = ARGS.__dict__
return adata
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dataset', type=str)
parser.add_argument('--latent_key', default='X_scVI_vanilla', type=str,
help = 'Key in adata.obsm to use as features for tsne.')
parser.add_argument('--tsne_key', default='X_scVI_tsne_vanilla', type=str,
help = 'Key in adata.obsm to save tsne embedding.')
parser.add_argument('--gpu', action='store_true',
help = 'Whether to use GPU-accelerated tsne via RapidsAI \
and the cuML library. ')
parser.add_argument('-j', '--n_jobs', default=12, type=int,
help = 'Number of jobs for MulticoreTSNE')
parser.add_argument('--perplexity', default=20, type=int)
parser.add_argument('--learning_rate', default=200., type=float)
parser.add_argument('--n_iter', default=1000, type=int)
parser.add_argument('--metric', default='euclidean', type=str)
parser.add_argument('--early_exaggeration', default=12, type=float)
parser.add_argument('--output_adata', default=None, type=str,
help = 'Path to save.')
ARGS = parser.parse_args()
adata = load_data(ARGS.dataset)
adata = do_tsne(adata, ARGS)
if ARGS.output_adata is not None:
print(f'[DO_TSNE] Writing to {ARGS.output_adata}')
adata.write(ARGS.output_adata)
|
[
"ing.nathany@gmail.com"
] |
ing.nathany@gmail.com
|
14e1a228d0680642f41d17ebeaa1552a75c5e0c5
|
1aa0ddb70fb893a6f958841b0a606cdcac954e18
|
/settings/forms/batches.py
|
a1931328efe9b17262c46d238776331d3278fa66
|
[] |
no_license
|
shitalluitel/LibraryManagementSystem
|
3042860a70096bf3821299fb10ca35958e680f62
|
eecd909b272ad7e524a031c9142d22a356141fda
|
refs/heads/master
| 2023-02-17T06:42:19.044516
| 2021-01-10T14:52:18
| 2021-01-10T14:52:18
| 166,533,846
| 2
| 1
| null | 2023-02-07T22:14:35
| 2019-01-19T10:22:41
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,003
|
py
|
from django import forms
from django.forms import ModelMultipleChoiceField
from settings.models import Batch, CourseBatch, Course
class BatchForm(forms.ModelForm):
class Meta:
model = Batch
fields = ['name', 'code']
widgets = {
'name': forms.TextInput(attrs={'class': 'form-control'}),
'code': forms.TextInput(attrs={'class': 'form-control'}),
}
labels = {
'name': 'Batch Name',
'code': 'Batch Code',
}
class CourseBatchCreateForm(forms.Form):
course = forms.ModelMultipleChoiceField(
queryset=Course.objects.all(),
label="Choose courses for this batch."
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
#
# self.fields['course'] = ModelMultipleChoiceField(queryset=Course.objects.all())
self.fields['course'].widget.attrs['class'] = 'form-control'
self.fields['course'].empty_label = "Choose a countries"
|
[
"ctalluitel@gmail.com"
] |
ctalluitel@gmail.com
|
5f01b00fb146fec0d23c878194633081590499e0
|
59de7788673ade984b9c9fbc33664a7cbdba67d3
|
/res/scripts/client/gui/scaleform/framework/entities/abstract/tooltipmgrmeta.py
|
a6dfedb52bce9035a795727e60fc365f096a4dbc
|
[] |
no_license
|
webiumsk/WOT-0.9.15-CT
|
3fa24ab37a6c91b7073034afb2f355efa5b7fe36
|
fbd194fbaa6bdece51c7a68fc35bbb5257948341
|
refs/heads/master
| 2020-12-24T21:27:23.175774
| 2016-05-01T13:47:44
| 2016-05-01T13:47:44
| 57,600,180
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 1,329
|
py
|
# 2016.05.01 15:22:59 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/Scaleform/framework/entities/abstract/ToolTipMgrMeta.py
from gui.Scaleform.framework.entities.BaseDAAPIModule import BaseDAAPIModule
class ToolTipMgrMeta(BaseDAAPIModule):
"""
DO NOT MODIFY!
Generated with yaml.
__author__ = 'yaml_processor'
@extends BaseDAAPIModule
null
"""
def onCreateComplexTooltip(self, tooltipId, stateType):
"""
:param tooltipId:
:param stateType:
:return :
"""
self._printOverrideError('onCreateComplexTooltip')
def onCreateTypedTooltip(self, type, args, stateType):
"""
:param type:
:param args:
:param stateType:
:return :
"""
self._printOverrideError('onCreateTypedTooltip')
def as_showS(self, tooltipData, linkage):
"""
:param tooltipData:
:param linkage:
:return :
"""
if self._isDAAPIInited():
return self.flashObject.as_show(tooltipData, linkage)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\scaleform\framework\entities\abstract\tooltipmgrmeta.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.05.01 15:22:59 Střední Evropa (letní čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
85d95bdfbd59a153f246c62bca01d14bff2342be
|
8382f4ec907950a8cfc618d3cceb97b0d00ab478
|
/6kyu/encryptThis.py
|
98249bc9ece7063bffc8fcf98db0cc716a54aaba
|
[] |
no_license
|
naistangz/codewars_challenges
|
80788f3869a4283c89ee2a05f19142b18ba4820c
|
372bbb6f1668b378183a169206526b52315107a8
|
refs/heads/master
| 2023-04-14T11:52:31.412554
| 2021-04-25T09:39:03
| 2021-04-25T09:39:03
| 299,615,380
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 443
|
py
|
def encrypt_this(text):
words = text.split(" ")
res = []
for i in words:
new = ""
temp = ""
for j in range(len(i)):
if j == 0:
new += str(ord(i[j]))
elif j == 1:
temp = i[j]
new += i[-1]
elif j == len(i) - 1:
new += temp
else:
new += i[j]
res.append(new)
return " ".join(list(filter(None, res)))
|
[
"a6anaistang@hotmail.co.uk"
] |
a6anaistang@hotmail.co.uk
|
7c8e4675d0711026385f5328533e7c8eeb8fad4d
|
56db1ccba3f8976b2df6d97c99e5aae7108149a1
|
/spending/main/admin.py
|
2c410651c1a51abbb5f05621793ae519229eae80
|
[] |
no_license
|
peterbe/django-spending
|
4d60b7a77250fc58eb7a397e388fd22fe73576de
|
ab2ab1730fbdd999e5ef8d75575795fa3a48d2b9
|
refs/heads/master
| 2021-01-10T05:32:00.005607
| 2013-07-06T05:41:41
| 2013-07-06T05:41:41
| 8,384,613
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 593
|
py
|
from django.contrib import admin
from spending.main.models import Household, Expense, Category
class HouseholdAdmin(admin.ModelAdmin):
list_display = ('name', 'no_users')
def no_users(self, obj):
return obj.users.all().count()
no_users.short_description = '# users'
class ExpenseAdmin(admin.ModelAdmin):
list_display = ('amount', 'date', 'user', 'category')
class CategoryAdmin(admin.ModelAdmin):
list_display = ('name',)
admin.site.register(Household, HouseholdAdmin)
admin.site.register(Expense, ExpenseAdmin)
admin.site.register(Category, CategoryAdmin)
|
[
"mail@peterbe.com"
] |
mail@peterbe.com
|
11ef2cc4fb52774a2fb7d480df6720fc9c79afd9
|
9b20743ec6cd28d749a4323dcbadb1a0cffb281b
|
/02_Statistical_Methods_for_Machine_Learning/14/01_tolerance.py
|
4a81741857f5d7f81dd597a2d99ba09c2f2bae3b
|
[] |
no_license
|
jggrimesdc-zz/MachineLearningExercises
|
6e1c7e1f95399e69bba95cdfe17c4f8d8c90d178
|
ee265f1c6029c91daff172b3e7c1a96177646bc5
|
refs/heads/master
| 2023-03-07T19:30:26.691659
| 2021-02-19T08:00:49
| 2021-02-19T08:00:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,055
|
py
|
# parametric tolerance interval
from numpy import mean
from numpy import sqrt
from numpy.random import randn
from numpy.random import seed
from scipy.stats import chi2
from scipy.stats import norm
# seed the random number generator
seed(1)
# generate dataset
data = 5 * randn(100) + 50
# specify degrees of freedom
n = len(data)
dof = n - 1
# specify data coverage
prop = 0.95
prop_inv = (1.0 - prop) / 2.0
gauss_critical = norm.ppf(prop_inv)
print('Gaussian critical value: %.3f (coverage=%d%%)' % (gauss_critical, prop * 100))
# specify confidence
prob = 0.99
prop_inv = 1.0 - prob
chi_critical = chi2.ppf(prop_inv, dof)
print('Chi-Squared critical value: %.3f (prob=%d%%, dof=%d)' % (chi_critical, prob * 100, dof))
# tolerance
interval = sqrt((dof * (1 + (1 / n)) * gauss_critical ** 2) / chi_critical)
print('Tolerance Interval: %.3f' % interval)
# summarize
data_mean = mean(data)
lower, upper = data_mean - interval, data_mean + interval
print('%.2f to %.2f covers %d%% of data with a confidence of %d%%' % (lower, upper, prop * 100, prob * 100))
|
[
"jgrimes@jgrimes.tech"
] |
jgrimes@jgrimes.tech
|
4fdc6b0d3c0d6e664d22960a9926a3b2127f2f29
|
753a70bc416e8dced2853f278b08ef60cdb3c768
|
/models/research/deeplab/datasets/build_cityscapes_data.py
|
ce81baef20a460abaa634d3f1dcb6760a0858dec
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
finnickniu/tensorflow_object_detection_tflite
|
ef94158e5350613590641880cb3c1062f7dd0efb
|
a115d918f6894a69586174653172be0b5d1de952
|
refs/heads/master
| 2023-04-06T04:59:24.985923
| 2022-09-20T16:29:08
| 2022-09-20T16:29:08
| 230,891,552
| 60
| 19
|
MIT
| 2023-03-25T00:31:18
| 2019-12-30T09:58:41
|
C++
|
UTF-8
|
Python
| false
| false
| 6,244
|
py
|
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts Cityscapes data to TFRecord file format with Example protos.
The Cityscapes dataset is expected to have the following directory structure:
+ cityscapes
- build_cityscapes_data.py (current working directiory).
- build_data.py
+ cityscapesscripts
+ annotation
+ evaluation
+ helpers
+ preparation
+ viewer
+ gtFine
+ train
+ val
+ test
+ leftImg8bit
+ train
+ val
+ test
+ tfrecord
This script converts data into sharded data files and save at tfrecord folder.
Note that before running this script, the users should (1) register the
Cityscapes dataset website at https://www.cityscapes-dataset.com to
download the dataset, and (2) run the script provided by Cityscapes
`preparation/createTrainIdLabelImgs.py` to generate the training groundtruth.
Also note that the tensorflow model will be trained with `TrainId' instead
of `EvalId' used on the evaluation server. Thus, the users need to convert
the predicted labels to `EvalId` for evaluation on the server. See the
vis.py for more details.
The Example proto contains the following fields:
image/encoded: encoded image content.
image/filename: image filename.
image/format: image file format.
image/height: image height.
image/width: image width.
image/channels: image channels.
image/segmentation/class/encoded: encoded semantic segmentation content.
image/segmentation/class/format: semantic segmentation file format.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import math
import os.path
import re
import sys
import build_data
from six.moves import range
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('cityscapes_root',
'./cityscapes',
'Cityscapes dataset root folder.')
tf.app.flags.DEFINE_string(
'output_dir',
'./tfrecord',
'Path to save converted SSTable of TensorFlow examples.')
_NUM_SHARDS = 10
# A map from data type to folder name that saves the data.
_FOLDERS_MAP = {
'image': 'leftImg8bit',
'label': 'gtFine',
}
# A map from data type to filename postfix.
_POSTFIX_MAP = {
'image': '_leftImg8bit',
'label': '_gtFine_labelTrainIds',
}
# A map from data type to data format.
_DATA_FORMAT_MAP = {
'image': 'png',
'label': 'png',
}
# Image file pattern.
_IMAGE_FILENAME_RE = re.compile('(.+)' + _POSTFIX_MAP['image'])
def _get_files(data, dataset_split):
"""Gets files for the specified data type and dataset split.
Args:
data: String, desired data ('image' or 'label').
dataset_split: String, dataset split ('train', 'val', 'test')
Returns:
A list of sorted file names or None when getting label for
test set.
"""
if data == 'label' and dataset_split == 'test':
return None
pattern = '*%s.%s' % (_POSTFIX_MAP[data], _DATA_FORMAT_MAP[data])
search_files = os.path.join(
FLAGS.cityscapes_root, _FOLDERS_MAP[data], dataset_split, '*', pattern)
filenames = glob.glob(search_files)
return sorted(filenames)
def _convert_dataset(dataset_split):
"""Converts the specified dataset split to TFRecord format.
Args:
dataset_split: The dataset split (e.g., train, val).
Raises:
RuntimeError: If loaded image and label have different shape, or if the
image file with specified postfix could not be found.
"""
image_files = _get_files('image', dataset_split)
label_files = _get_files('label', dataset_split)
num_images = len(image_files)
num_per_shard = int(math.ceil(num_images / _NUM_SHARDS))
image_reader = build_data.ImageReader('png', channels=3)
label_reader = build_data.ImageReader('png', channels=1)
for shard_id in range(_NUM_SHARDS):
shard_filename = '%s-%05d-of-%05d.tfrecord' % (
dataset_split, shard_id, _NUM_SHARDS)
output_filename = os.path.join(FLAGS.output_dir, shard_filename)
with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
start_idx = shard_id * num_per_shard
end_idx = min((shard_id + 1) * num_per_shard, num_images)
for i in range(start_idx, end_idx):
sys.stdout.write('\r>> Converting image %d/%d shard %d' % (
i + 1, num_images, shard_id))
sys.stdout.flush()
# Read the image.
image_data = tf.gfile.FastGFile(image_files[i], 'rb').read()
height, width = image_reader.read_image_dims(image_data)
# Read the semantic segmentation annotation.
seg_data = tf.gfile.FastGFile(label_files[i], 'rb').read()
seg_height, seg_width = label_reader.read_image_dims(seg_data)
if height != seg_height or width != seg_width:
raise RuntimeError('Shape mismatched between image and label.')
# Convert to tf example.
re_match = _IMAGE_FILENAME_RE.search(image_files[i])
if re_match is None:
raise RuntimeError('Invalid image filename: ' + image_files[i])
filename = os.path.basename(re_match.group(1))
example = build_data.image_seg_to_tfexample(
image_data, filename, height, width, seg_data)
tfrecord_writer.write(example.SerializeToString())
sys.stdout.write('\n')
sys.stdout.flush()
def main(unused_argv):
# Only support converting 'train' and 'val' sets for now.
for dataset_split in ['train', 'val']:
_convert_dataset(dataset_split)
if __name__ == '__main__':
tf.app.run()
|
[
"finn.niu@apptech.com.hk"
] |
finn.niu@apptech.com.hk
|
36427016924bc734286ed9ff39b3812b2d38b21a
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2367/60699/251530.py
|
e61d0d3cb0da03640cd9f10d895c7a604b12880b
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 167
|
py
|
res1=1
list1=[1]
for i in range(0,30):
res1=res1*10+1
list1.append(res1)
n=int(input())
for i in list1:
if i%n==0:
print(i)
break
print(-1)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
47e5b33bf2c46dffa3df76a2bf4134619041815a
|
b22588340d7925b614a735bbbde1b351ad657ffc
|
/athena/Reconstruction/RecExample/RecExCommon/share/ContainerRemapping.py
|
00f5c31d568f606d42fba403b8779c3df62a656f
|
[] |
no_license
|
rushioda/PIXELVALID_athena
|
90befe12042c1249cbb3655dde1428bb9b9a42ce
|
22df23187ef85e9c3120122c8375ea0e7d8ea440
|
refs/heads/master
| 2020-12-14T22:01:15.365949
| 2020-01-19T03:59:35
| 2020-01-19T03:59:35
| 234,836,993
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,782
|
py
|
include.block ("RecExCommon/ContainerRemapping.py")
from AthenaCommon.AppMgr import ServiceMgr
# Instantiate the address remapping service:
if not hasattr( ServiceMgr, "AddressRemappingSvc" ):
ServiceMgr += CfgMgr.AddressRemappingSvc()
pass
if not hasattr( ServiceMgr, "ProxyProviderSvc" ):
ServiceMgr += CfgMgr.ProxyProviderSvc()
pass
ServiceMgr.ProxyProviderSvc.ProviderNames += [ "AddressRemappingSvc" ]
# Declare the name conversion rules:
ServiceMgr.AddressRemappingSvc.TypeKeyOverwriteMaps += [
"xAOD::ElectronContainer#ElectronCollection->"
"xAOD::ElectronContainer#Electrons",
"xAOD::ElectronAuxContainer#ElectronCollectionAux.->"
"xAOD::ElectronAuxContainer#ElectronsAux.",
"xAOD::ElectronContainer#FwdElectrons->"
"xAOD::ElectronContainer#ForwardElectrons",
"xAOD::ElectronAuxContainer#FwdElectronsAux.->"
"xAOD::ElectronAuxContainer#ForwardElectronsAux.",
"xAOD::PhotonContainer#PhotonCollection->"
"xAOD::PhotonContainer#Photons",
"xAOD::PhotonAuxContainer#PhotonCollectionAux.->"
"xAOD::PhotonAuxContainer#PhotonsAux.",
"xAOD::CaloClusterContainer#egClusterCollection->"
"xAOD::CaloClusterContainer#egammaClusters",
"xAOD::CaloClusterAuxContainer#egClusterCollectionAux.->"
"xAOD::CaloClusterAuxContainer#egammaClustersAux.",
"xAOD::CaloClusterContainer#LArClusterEMFrwd->"
"xAOD::CaloClusterContainer#ForwardElectronClusters",
"xAOD::CaloClusterAuxContainer#LArClusterEMFrwdAux.->"
"xAOD::CaloClusterAuxContainer#ForwardElectronClustersAux.",
"xAOD::TrackParticleContainer#InDetTrackParticlesForward->"
"xAOD::TrackParticleContainer#InDetForwardTrackParticles",
"xAOD::TrackParticleAuxContainer#InDetTrackParticlesForwardAux.->"
"xAOD::TrackParticleAuxContainer#InDetForwardTrackParticlesAux.",
"xAOD::TrackParticleContainer#InDetTrackParticlesLowBeta->"
"xAOD::TrackParticleContainer#InDetLowBetaTrackParticles",
"xAOD::TrackParticleAuxContainer#InDetTrackParticlesLowBetaAux.->"
"xAOD::TrackParticleAuxContainer#InDetLowBetaTrackParticlesAux.",
"xAOD::TauJetContainer#TauRecContainer->"
"xAOD::TauJetContainer#TauJets",
"xAOD::TauJetAuxContainer#TauRecContainerAux.->"
"xAOD::TauJetAuxContainer#TauJetsAux.",
"xAOD::CaloClusterContainer#TauPi0ClusterContainer->"
"xAOD::CaloClusterContainer#TauPi0Clusters",
"xAOD::CaloClusterAuxContainer#TauPi0ClusterContainerAux.->"
"xAOD::CaloClusterAuxContainer#TauPi0ClustersAux.",
"xAOD::VertexContainer#TauSecondaryVertexContainer->"
"xAOD::VertexContainer#TauSecondaryVertices",
"xAOD::VertexAuxContainer#TauSecondaryVertexContainerAux.->"
"xAOD::VertexAuxContainer#TauSecondaryVerticesAux.",
"xAOD::PFOContainer#TauShotPFOContainer->"
"xAOD::PFOContainer#TauShotParticleFlowObjects",
"xAOD::PFOAuxContainer#TauShotPFOContainerAux.->"
"xAOD::PFOAuxContainer#TauShotParticleFlowObjectsAux.",
"xAOD::PFOContainer#TauPi0ChargedPFOContainer->"
"xAOD::PFOContainer#TauChargedParticleFlowObjects",
"xAOD::PFOAuxContainer#TauPi0ChargedPFOContainerAux.->"
"xAOD::PFOAuxContainer#TauChargedParticleFlowObjectsAux.",
"xAOD::PFOContainer#TauPi0NeutralPFOContainer->"
"xAOD::PFOContainer#TauNeutralParticleFlowObjects",
"xAOD::PFOAuxContainer#TauPi0NeutralPFOContainerAux.->"
"xAOD::PFOAuxContainer#TauNeutralParticleFlowObjectsAux.",
"xAOD::PFOContainer#chargedJetETMissPFO_eflowRec->"
"xAOD::PFOContainer#JetETMissChargedParticleFlowObjects",
"xAOD::PFOAuxContainer#chargedJetETMissPFO_eflowRecAux.->"
"xAOD::PFOAuxContainer#JetETMissChargedParticleFlowObjectsAux.",
"xAOD::PFOContainer#neutralJetETMissPFO_eflowRec->"
"xAOD::PFOContainer#JetETMissNeutralParticleFlowObjects",
"xAOD::PFOAuxContainer#neutralJetETMissPFO_eflowRecAux.->"
"xAOD::PFOAuxContainer#JetETMissNeutralParticleFlowObjectsAux.",
"xAOD::CaloClusterContainer#CaloCalTopoCluster->"
"xAOD::CaloClusterContainer#CaloCalTopoClusters",
"xAOD::CaloClusterAuxContainer#CaloCalTopoClusterAux.->"
"xAOD::CaloClusterAuxContainer#CaloCalTopoClustersAux.",
"xAOD::TruthEventContainer#TruthEvent->"
"xAOD::TruthEventContainer#TruthEvents",
"xAOD::TruthEventAuxContainer#TruthEventAux.->"
"xAOD::TruthEventAuxContainer#TruthEventsAux.",
"xAOD::TruthParticleContainer#TruthParticle->"
"xAOD::TruthParticleContainer#TruthParticles",
"xAOD::TruthParticleAuxContainer#TruthParticleAux.->"
"xAOD::TruthParticleAuxContainer#TruthParticlesAux.",
"xAOD::TruthVertexContainer#TruthVertex->"
"xAOD::TruthVertexContainer#TruthVertices",
"xAOD::TruthVertexAuxContainer#TruthVertexAux.->"
"xAOD::TruthVertexAuxContainer#TruthVerticesAux."
]
|
[
"rushioda@lxplus754.cern.ch"
] |
rushioda@lxplus754.cern.ch
|
70b13e09918671ec8f42febe6f91674c2a84f798
|
d4f2e2e3552ab4b111f78cfbad0d30c144201093
|
/2016-12-20/semaphore.py
|
2c1f4492110d89b3a3f1daa84001456b57596e8d
|
[
"Apache-2.0"
] |
permissive
|
dongweiming/mp
|
c1e9f6f2c1fd8adbd4d7b8ffc45c5cc288cdcd80
|
129c31c818e1f0c39c983aad1f2f1ad9fa7efb1c
|
refs/heads/master
| 2023-04-29T07:56:27.198574
| 2022-10-30T04:20:09
| 2022-10-30T04:21:27
| 75,051,758
| 96
| 35
|
Apache-2.0
| 2023-04-17T17:34:17
| 2016-11-29T06:44:53
|
Python
|
UTF-8
|
Python
| false
| false
| 509
|
py
|
import aiohttp
import asyncio
NUMBERS = range(12)
URL = 'http://httpbin.org/get?a={}'
sema = asyncio.Semaphore(3)
async def fetch_async(a):
async with aiohttp.request('GET', URL.format(a)) as r:
data = await r.json()
return data['args']['a']
async def print_result(a):
with (await sema):
r = await fetch_async(a)
print('fetch({}) = {}'.format(a, r))
loop = asyncio.get_event_loop()
f = asyncio.wait([print_result(num) for num in NUMBERS])
loop.run_until_complete(f)
|
[
"ciici1234@hotmail.com"
] |
ciici1234@hotmail.com
|
b097b7a2e91b91ea67969ca245e6a9c69ad4bc7f
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Selenium_Chromium/source/selenium/webdriver/edge/service.py
|
9eac51171035f1d2bd648ca409aeee7b8c69b782
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 2,161
|
py
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.webdriver.common import service
class Service(service.Service):
def __init__(self, executable_path, port=0, verbose=False, log_path=None):
"""
Creates a new instance of the EdgeDriver service.
EdgeDriver provides an interface for Microsoft WebDriver to use
with Microsoft Edge.
:param executable_path: Path to the Microsoft WebDriver binary.
:param port: Run the remote service on a specified port.
Defaults to 0, which binds to a random open port of the
system's choosing.
:verbose: Whether to make the webdriver more verbose (passes the
--verbose option to the binary). Defaults to False.
:param log_path: Optional path for the webdriver binary to log to.
Defaults to None which disables logging.
"""
self.service_args = []
if verbose:
self.service_args.append("--verbose")
params = {
"executable": executable_path,
"port": port,
"start_error_message": "Please download from http://go.microsoft.com/fwlink/?LinkId=619687"
}
if log_path:
params["log_file"] = open(log_path, "a+")
service.Service.__init__(self, **params)
def command_line_args(self):
return ["--port=%d" % self.port] + self.service_args
|
[
"ryfeus@gmail.com"
] |
ryfeus@gmail.com
|
fe0e6cff95e5d8a330eff9257815093428fb3c63
|
43ab33b2f50e47f5dbe322daa03c86a99e5ee77c
|
/test/test_od_mcomplex_type_definition_range_check.py
|
1654fc81fa59cb3e96dcfdc2ece04a4d325049a1
|
[] |
no_license
|
Sage-Bionetworks/rcc-client
|
c770432de2d2950e00f7c7bd2bac22f3a81c2061
|
57c4a621aecd3a2f3f9faaa94f53b2727992a01a
|
refs/heads/main
| 2023-02-23T05:55:39.279352
| 2021-01-21T02:06:08
| 2021-01-21T02:06:08
| 331,486,099
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,610
|
py
|
# coding: utf-8
"""
nPhase REST Resource
REDCap REST API v.2 # noqa: E501
The version of the OpenAPI document: 2.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import rcc
from rcc.models.od_mcomplex_type_definition_range_check import ODMcomplexTypeDefinitionRangeCheck # noqa: E501
from rcc.rest import ApiException
class TestODMcomplexTypeDefinitionRangeCheck(unittest.TestCase):
"""ODMcomplexTypeDefinitionRangeCheck unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test ODMcomplexTypeDefinitionRangeCheck
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = rcc.models.od_mcomplex_type_definition_range_check.ODMcomplexTypeDefinitionRangeCheck() # noqa: E501
if include_optional :
return ODMcomplexTypeDefinitionRangeCheck(
check_value = [
rcc.models.od_mcomplex_type_definition_check_value.ODMcomplexTypeDefinitionCheckValue(
value = '0', )
],
formal_expression = [
rcc.models.od_mcomplex_type_definition_formal_expression.ODMcomplexTypeDefinitionFormalExpression(
value = '0',
context = '0', )
],
measurement_unit_ref = rcc.models.od_mcomplex_type_definition_measurement_unit_ref.ODMcomplexTypeDefinitionMeasurementUnitRef(
measurement_unit_oid = '0', ),
error_message = rcc.models.od_mcomplex_type_definition_error_message.ODMcomplexTypeDefinitionErrorMessage(
translated_text = [
rcc.models.od_mcomplex_type_definition_translated_text.ODMcomplexTypeDefinitionTranslatedText(
value = '0',
lang = '0', )
], ),
comparator = 'LT',
soft_hard = 'SOFT'
)
else :
return ODMcomplexTypeDefinitionRangeCheck(
)
def testODMcomplexTypeDefinitionRangeCheck(self):
"""Test ODMcomplexTypeDefinitionRangeCheck"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
[
"thomas.yu@sagebase.org"
] |
thomas.yu@sagebase.org
|
9057b02d2ebad2cfc59b5649da5d1eeb5780b432
|
8d5f49fa1fda8ffc473e7f5a62786c77838a5820
|
/website/drawquest/dbrouters.py
|
23bbfb4be240253be8526040cf768de593b23d88
|
[
"BSD-3-Clause"
] |
permissive
|
MichaelBechHansen/drawquest-web
|
dfc6f5d9541860a5df23db678e82564a230bd42e
|
8d8f9149b6efeb65202809a5f8916386f58a1b3b
|
refs/heads/master
| 2021-01-14T10:30:10.861222
| 2015-11-10T03:13:42
| 2015-11-10T03:13:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,679
|
py
|
from django.conf import settings
class DatabaseAppRouter(object):
"""
A router to control all database operations on models for different
databases.
In case an app is not set in settings.DATABASE_APPS_MAPPING, the router
will fallback to the `default` database.
Settings example:
DATABASE_APPS_MAPPING = {'app1': 'db1', 'app2': 'db2'}
"""
def db_for_read(self, model, **hints):
"""" Point all read operations to the specific database. """
return settings.DATABASE_APPS_MAPPING.get(model._meta.app_label)
def db_for_write(self, model, **hints):
""" Point all write operations to the specific database. """
return settings.DATABASE_APPS_MAPPING.get(model._meta.app_label)
def allow_relation(self, obj1, obj2, **hints):
""" Allow any relation between apps that use the same database. """
db_obj1 = settings.DATABASE_APPS_MAPPING.get(obj1._meta.app_label)
db_obj2 = settings.DATABASE_APPS_MAPPING.get(obj2._meta.app_label)
if db_obj1 and db_obj2:
if db_obj1 == db_obj2:
return True
else:
return False
def allow_syncdb(self, db, model):
""" Make sure that apps only appear in the related database. """
if model._meta.app_label == 'south':
return True
elif db in settings.DATABASE_APPS_MAPPING.values():
return settings.DATABASE_APPS_MAPPING.get(model._meta.app_label) == db
elif settings.DATABASE_APPS_MAPPING.has_key(model._meta.app_label):
return False
elif db != 'default':
return False
return True
|
[
"alex.ehlke@gmail.com"
] |
alex.ehlke@gmail.com
|
444afd65d83f521bbd49a2443f13fc3fbfceb654
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03078/s480863669.py
|
ac933d8bc820956754a8b02303270586b6a2aaa3
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 495
|
py
|
# solution
x,y,z,k = map(int, input().split())
a = sorted(list(map(int, input().split())), reverse = True)
b = sorted(list(map(int, input().split())), reverse = True)
c = sorted(list(map(int, input().split())), reverse = True)
ans = []
for p in range(min(k,len(a))):
for q in range(min(k,len(b))):
for r in range(min(k,len(c))):
if((p+1)*(q+1)*(r+1) > k):
break
ans.append(a[p] + b[q] + c[r])
ans = sorted(ans, reverse = True)
for i in range(k):
print(ans[i])
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
3a074572647edca905c1104c2e82709c859ebddb
|
4050f786f3cc505760e25608d66805e3543835f8
|
/the_flyer_15147/urls.py
|
141a25667c75334ebfabf7887b5c99cfe55f3ff9
|
[] |
no_license
|
crowdbotics-apps/the-flyer-15147
|
6fb0a403286d06c5393d9f58b39f76ad5c538312
|
e2f62327110f1200c8d4ebf46f127ce4fe903189
|
refs/heads/master
| 2022-12-11T02:03:31.153849
| 2020-03-28T02:01:50
| 2020-03-28T02:01:50
| 250,693,069
| 0
| 0
| null | 2022-12-08T05:09:49
| 2020-03-28T01:59:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,055
|
py
|
"""the_flyer_15147 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
path("api/v1/", include("event.api.v1.urls")),
path("event/", include("event.urls")),
path("home/", include("home.urls")),
]
admin.site.site_header = "the flyer"
admin.site.site_title = "the flyer Admin Portal"
admin.site.index_title = "the flyer Admin"
# swagger
schema_view = get_schema_view(
openapi.Info(
title="the flyer API",
default_version="v1",
description="API documentation for the flyer App",
),
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
1e445cc5bd290315f961eb98d248e02c72584909
|
f4653b4bd7528150a53c8f454658c00d7ea0b836
|
/cbm/ipycbm/ipy_view/view_main.py
|
e19d0de98e84ac96843c63b82e5adf468f855f50
|
[
"BSD-3-Clause"
] |
permissive
|
mokasini/cbm
|
ccb09cb8ab96e6b06b0e13d86ff51124538706f6
|
33bd9c8a0d107f6cdc3343953ae9f7c9bd9272cd
|
refs/heads/main
| 2023-02-24T04:44:07.744715
| 2021-02-01T12:29:38
| 2021-02-01T12:29:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 976
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of CbM (https://github.com/ec-jrc/cbm).
# Author : Konstantinos Anastasakis
# Credits : GTCAP Team
# Copyright : 2021 European Commission, Joint Research Centre
# License : 3-Clause BSD
from ipywidgets import Tab
from cbm.ipycbm.utils import help_docs
from cbm.ipycbm.ipy_view import view_settings, view_panel
def view_widget_box():
try:
tab_box = Tab(children=[view_panel.view(), help_docs.widget_box(),
view_settings.widget_box()])
tab_box.set_title(0, 'View Data')
tab_box.set_title(1, 'Help')
tab_box.set_title(2, 'Settings')
except Exception as err:
tab_box = Tab(children=[help_docs.widget_box(),
view_settings.widget_box()])
tab_box.set_title(1, 'Help')
tab_box.set_title(2, 'Settings')
print("Could not show 'View panel'.", err)
return tab_box
|
[
"Konstantinos.ANASTASAKIS@ext.ec.europa.eu"
] |
Konstantinos.ANASTASAKIS@ext.ec.europa.eu
|
27c35318c6b5f8212dd449e282c2b081d6dc4c61
|
046c1141399890afa13fd243e55da3dbf31085c5
|
/test/test22.py
|
05c20e2d7892acce138d4df0ab6d184be9b7d49e
|
[] |
no_license
|
carusyte/tflab
|
1d0edf87282352aeb5a38b83c58ab9c0189bbb1a
|
2324c3b0ad22d28c50a4fd8db56e36a2836735c3
|
refs/heads/master
| 2021-05-12T06:58:26.270868
| 2019-03-24T14:57:44
| 2019-03-24T14:57:44
| 117,232,451
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,165
|
py
|
from __future__ import print_function
# Path hack.
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/..")
import tensorflow as tf
from pstk.model import model11
from time import strftime
from pstk.data import data as data0
from pstk.data import data12
from test import collect_summary
import os
import numpy as np
import math
EPOCH_SIZE = 444
RNN_LAYERS = 1
FCN_LAYERS = 3
LAYER_WIDTH = 256
MAX_STEP = 50
TIME_SHIFT = 9
DROP_OUT = math.e / 10.0
LEARNING_RATE = 1e-3
LOG_DIR = 'logdir'
def run():
tf.logging.set_verbosity(tf.logging.INFO)
loader = data12.DataLoader(TIME_SHIFT)
print('{} loading test data...'.format(strftime("%H:%M:%S")))
tuuids, tdata, tlabels, tseqlen = loader.loadTestSet(MAX_STEP)
print(tdata.shape)
print(tlabels.shape)
featSize = tdata.shape[2]
nclass = tlabels.shape[1]
classes = [i-nclass//2 for i in range(nclass)]
data = tf.placeholder(tf.float32, [None, MAX_STEP, featSize], "input")
target = tf.placeholder(tf.float32, [None, nclass], "labels")
seqlen = tf.placeholder(tf.int32, [None], "seqlen")
dropout = tf.placeholder(tf.float32, [], name="dropout")
training = tf.placeholder(tf.bool, [], name="training")
with tf.Session() as sess:
model = model11.DRnnPredictorV6(
data=data,
target=target,
seqlen=seqlen,
classes=classes,
rnn_layers=RNN_LAYERS,
fcn_layers=FCN_LAYERS,
layer_width=LAYER_WIDTH,
dropout=dropout,
training=training,
learning_rate=LEARNING_RATE)
stime = '{}'.format(strftime("%Y-%m-%d %H:%M:%S"))
model_name = model.getName()
f = __file__
fbase = f[f.rfind('/')+1:f.rindex('.py')]
base_dir = '{}/{}_{}/{}'.format(LOG_DIR, fbase,
model_name, strftime("%Y%m%d_%H%M%S"))
print('{} using model: {}'.format(strftime("%H:%M:%S"), model_name))
if tf.gfile.Exists(base_dir):
tf.gfile.DeleteRecursively(base_dir)
tf.gfile.MakeDirs(base_dir)
# Isolate the variables stored behind the scenes by the metric operation
metric_local_vars = tf.get_collection(
tf.GraphKeys.LOCAL_VARIABLES, scope="Precisions") + tf.get_collection(
tf.GraphKeys.LOCAL_VARIABLES, scope="Recalls")
metric_vars_initializer = tf.variables_initializer(
var_list=metric_local_vars)
sess.run(tf.group(tf.global_variables_initializer(),
metric_vars_initializer))
summary, train_writer, test_writer = collect_summary(
sess, model, base_dir)
saver = tf.train.Saver()
bno = 0
for epoch in range(EPOCH_SIZE):
bno = epoch*50
print('{} running on test set...'.format(strftime("%H:%M:%S")))
feeds = {data: tdata, target: tlabels,
seqlen: tseqlen, dropout: 0, training: False}
accuracy, worst, test_summary_str = sess.run(
[model.accuracy, model.worst, summary, model.precisions[1], model.recalls[1], model.f_score], feeds)[:3]
bidx, max_entropy, predict, actual = worst[0], worst[1], worst[2], worst[3]
print('{} Epoch {} test accuracy {:3.3f}% max_entropy {:3.4f} predict {} actual {} uuid {}'.format(
strftime("%H:%M:%S"), epoch, 100. * accuracy, max_entropy, predict, actual, tuuids[bidx]))
data0.save_worst_rec(model_name, stime, "test", epoch,
tuuids[bidx], max_entropy, predict, actual)
summary_str = None
for i in range(50):
sess.run(metric_vars_initializer)
bno = bno+1
print('{} loading training data for batch {}...'.format(
strftime("%H:%M:%S"), bno))
truuids, trdata, labels, trseqlen = loader.loadTrainingData(
bno, MAX_STEP)
print('{} training...'.format(strftime("%H:%M:%S")))
feeds = {data: trdata, target: labels,
seqlen: trseqlen, dropout: DROP_OUT, training: True}
summary_str, worst = sess.run(
[summary, model.worst, model.optimize, model.precisions[1], model.recalls[1], model.f_score], feeds)[:2]
bidx, max_entropy, predict, actual = worst[0], worst[1], worst[2], worst[3]
print('{} bno {} max_entropy {:3.4f} predict {} actual {}'.format(
strftime("%H:%M:%S"), bno, max_entropy, predict, actual))
data0.save_worst_rec(model_name, stime, "train", bno,
truuids[bidx], max_entropy, predict, actual)
train_writer.add_summary(summary_str, bno)
test_writer.add_summary(test_summary_str, bno)
train_writer.flush()
test_writer.flush()
checkpoint_file = os.path.join(base_dir, 'model.ckpt')
saver.save(sess, checkpoint_file, global_step=bno)
sess.run(metric_vars_initializer)
# test last epoch
print('{} running on test set...'.format(strftime("%H:%M:%S")))
feeds = {data: tdata, target: tlabels, seqlen: tseqlen,
dropout: 0, training: False}
accuracy, worst, test_summary_str = sess.run(
[model.accuracy, model.worst, summary, model.precisions[1], model.recalls[1], model.f_score], feeds)[:3]
bidx, max_entropy, predict, actual = worst[0], worst[1], worst[2], worst[3]
print('{} Epoch {} test accuracy {:3.3f}% max_entropy {:3.4f} predict {} actual {}'.format(
strftime("%H:%M:%S"), EPOCH_SIZE, 100. * accuracy, max_entropy, predict, actual))
data0.save_worst_rec(model_name, stime, "test", EPOCH_SIZE,
tuuids[bidx], max_entropy, predict, actual)
train_writer.add_summary(summary_str, bno)
test_writer.add_summary(test_summary_str, bno)
train_writer.flush()
test_writer.flush()
if __name__ == '__main__':
run()
|
[
"carusyte@163.com"
] |
carusyte@163.com
|
4ecf47ca7e7b37620817c44064a35600aa63affa
|
dfc2c18053b8e7576f88e7b2524d7ca3a8f47282
|
/ch03/session3/63.py
|
a3458fac0a02818719ccecbeba2d2a88982ce7e0
|
[] |
no_license
|
Xoozi/tchomework
|
a6eed3bbf697ff12af8d42249ec58a139aed0c4c
|
627c98b0b652ef20fd93025a17341bba76fbfce6
|
refs/heads/master
| 2021-01-23T21:18:15.793703
| 2018-10-21T11:05:55
| 2018-10-21T11:05:55
| 57,583,655
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,067
|
py
|
#一族三次曲线
#(a)对k = 0, 及其邻近的k的正和负值, 把f(x) = x**3 + k*x的图形画在一个公共屏幕上.
#k的值是怎么影响到图形的形状的
#k小于0时, 函数递减, 只有一个根
#k向0移动, 函数图像开始逆时针旋转, 并且开始弯曲, 靠近0时开始有多个根
#k大于0时, 又开始伸展,
#(b)求f'(x). 正如你知道的, f'(x)是一个二次函数, 求该二次函数的判别式. 对什么样的k值, 该判别式
#为正, 为零, 为负? 对什么k值f'有两个零点, 一个或,没有零点?
#说明k的值对f图形的形状有什么影响?
#f'(x) = 3*x**2 + k
#Δ = -4*3*k = -12k
#k>0时Δ<0, f'无零点
#k<0时Δ>0, f'有两个零点
#k=0时Δ=0, f'有一个零点
#说明k值影响了f是否有极值
def f(x, k):
return x**3 + k*x
def ddd(s, e, a):
r = 0
g = 0
b = 0
k = -1280
plot([s, e], [0, 0], '-k')
x = linspace(s, e, a)
while(k <= 1280):
y = f(x, k)
plot(x, y, '#%02X%02X%02X' % (r, g, b))
r += 2
k += 20
ddd(-16, 16, 1000)
|
[
"wwfxtt@gmail.com"
] |
wwfxtt@gmail.com
|
d954433fc734887cf2bed62499ea0205cefd66a3
|
30b97efb2f36f81aa684d16d19e0e2db17f2967d
|
/기타/2468.py
|
05d75b8ff59e165f6298ad243ba4d49c20202b24
|
[] |
no_license
|
jmseb3/bakjoon
|
0a784a74c6476ef51864e2ada9d2551c7c7979eb
|
a38db54e851372059b0e45add92e43e556835e62
|
refs/heads/main
| 2023-08-25T08:43:04.579785
| 2021-10-01T08:40:37
| 2021-10-01T08:40:37
| 362,287,450
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 956
|
py
|
from collections import deque
N = int(input())
maps = []
max_len = 0
for _ in range(N):
temp = list(map(int, input().split()))
max_len = max(max_len, max(temp))
maps.append(temp)
moves = [(-1, 0), (1, 0), (0, 1), (0, -1)]
ans = 0
def bfs(y, x, ck, visited):
q = deque()
q.append((y, x))
visited[y][x] = True
while q:
y, x = q.popleft()
for dy, dx in moves:
ny = y + dy
nx = x + dx
if ny < 0 or nx < 0 or ny >= N or nx >= N:
continue
if maps[ny][nx] >= ck and not visited[ny][nx]:
visited[ny][nx] = True
q.append((ny, nx))
for ck in range(max_len+1):
tmp = 0
visited = [[False]*N for _ in range(N)]
for y in range(N):
for x in range(N):
if maps[y][x] >= ck and not visited[y][x]:
bfs(y, x, ck, visited)
tmp += 1
ans = max(tmp, ans)
print(ans)
|
[
"jmseb3@naver.com"
] |
jmseb3@naver.com
|
84f29f68b65af4c479188bad5fe13eb540caa362
|
7fac5e7216c8f4328f21a14f9e222005890a57f8
|
/11_Actor_Critic_Advantage/refactor/CartPole/network.py
|
a91406a51ae6f89da620c63ac4298837c272d612
|
[] |
no_license
|
ZhangRui111/MorvanRL
|
bee77d644df50ce9900be6ec7d702c395238fae4
|
ad443d56314427aa9ebe4af552dde0f5470da967
|
refs/heads/master
| 2021-04-14T12:05:14.657272
| 2019-03-26T02:28:31
| 2019-03-26T02:28:31
| 126,663,363
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,786
|
py
|
import numpy as np
import tensorflow as tf
def build_actor_network(n_features, n_actions, lr):
s = tf.placeholder(tf.float32, [1, n_features], "state")
a = tf.placeholder(tf.int32, None, "act")
td_error = tf.placeholder(tf.float32, None, "td_error") # TD_error
with tf.variable_scope('Actor'):
l1 = tf.contrib.layers.fully_connected(s, 20, activation_fn=tf.nn.relu)
acts_prob = tf.contrib.layers.fully_connected(l1, n_actions, activation_fn=tf.nn.softmax)
with tf.variable_scope('exp_v'):
log_prob = tf.log(acts_prob[0, a])
# log_prob = tf.exp(acts_prob[0, a]) # tf.exp can also convergent
exp_v = tf.reduce_mean(log_prob * td_error) # advantage (TD_error) guided loss
with tf.variable_scope('train'):
train_op = tf.train.AdamOptimizer(lr).minimize(-exp_v) # minimize(-exp_v) = maximize(exp_v)
return [[s, a, td_error], [acts_prob, exp_v, train_op]]
# # debug mode # #
# return [[s, a, td_error], [acts_prob, exp_v, train_op], [log_prob, l1]]
# # debug mode # #
def build_critic_network(n_features, lr, discount):
s = tf.placeholder(tf.float32, [1, n_features], "state")
v_ = tf.placeholder(tf.float32, [1, 1], "v_next")
r = tf.placeholder(tf.float32, None, 'r')
with tf.variable_scope('Critic'):
l1 = tf.contrib.layers.fully_connected(s, 20, activation_fn=tf.nn.relu)
v = tf.contrib.layers.fully_connected(l1, 1, activation_fn=None)
with tf.variable_scope('squared_TD_error'):
td_error = r + discount * v_ - v
loss = tf.square(td_error) # TD_error = (r+gamma*V_next) - V_eval
with tf.variable_scope('train'):
train_op = tf.train.AdamOptimizer(lr).minimize(loss)
return [[s, v_, r], [v, td_error, loss, train_op]]
|
[
"zhangruisg111@163.com"
] |
zhangruisg111@163.com
|
5db9f1bb82aaada88a79243dab0be796299f41e9
|
a1d8fefb84ce2f69ebce5fedcdf5262ba0005a5f
|
/zvt/recorders/eastmoney/meta/china_stock_category_recorder.py
|
a0a6b9c47e284491ad80a9fade92202ba428d1f4
|
[
"MIT"
] |
permissive
|
vinhphu3000/FinanceCenter
|
f2c827ffe268421011682ed45375f55ac6ddc54a
|
1511751fe6d7d1f1fb940ae66d29b45eb0782fea
|
refs/heads/master
| 2023-03-30T19:55:17.124679
| 2021-03-27T11:40:18
| 2021-03-27T11:40:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,630
|
py
|
# -*- coding: utf-8 -*-
import pandas as pd
from numba import njit
from zvt import zvt_config
from zvt.api.data_type import Region, Provider, EntityType
from zvt.api.quote import china_stock_code_to_id
from zvt.domain import BlockStock, BlockCategory, Block
from zvt.contract.api import df_to_db
from zvt.contract.recorder import RecorderForEntities, TimeSeriesDataRecorder
from zvt.networking.request import sync_get
from zvt.utils.time_utils import now_pd_timestamp, PD_TIME_FORMAT_DAY
from zvt.utils.utils import json_callback_param
class EastmoneyChinaBlockRecorder(RecorderForEntities):
provider = Provider.EastMoney
data_schema = Block
region = Region.CHN
# 用于抓取行业/概念/地域列表
category_map_url = {
BlockCategory.industry: 'https://nufm.dfcfw.com/EM_Finance2014NumericApplication/JS.aspx?type=CT&cmd=C._BKHY&sty=DCRRBKCPAL&st=(ChangePercent)&sr=-1&p=1&ps=200&lvl=&cb=jsonp_F1A61014DE5E45B7A50068EA290BC918&token=4f1862fc3b5e77c150a2b985b12db0fd&_=08766',
BlockCategory.concept: 'https://nufm.dfcfw.com/EM_Finance2014NumericApplication/JS.aspx?type=CT&cmd=C._BKGN&sty=DCRRBKCPAL&st=(ChangePercent)&sr=-1&p=1&ps=300&lvl=&cb=jsonp_3071689CC1E6486A80027D69E8B33F26&token=4f1862fc3b5e77c150a2b985b12db0fd&_=08251',
# BlockCategory.area: 'https://nufm.dfcfw.com/EM_Finance2014NumericApplication/JS.aspx?type=CT&cmd=C._BKDY&sty=DCRRBKCPAL&st=(ChangePercent)&sr=-1&p=1&ps=200&lvl=&cb=jsonp_A597D4867B3D4659A203AADE5B3B3AD5&token=4f1862fc3b5e77c150a2b985b12db0fd&_=02443'
}
def init_entities(self):
self.entities = [BlockCategory.industry, BlockCategory.concept]
def process_loop(self, entity, http_session):
text = sync_get(http_session, self.category_map_url[entity], return_type='text')
if text is None:
return
results = json_callback_param(text)
@njit(nopython=True)
def numba_boost_up(results):
the_list = []
for result in results:
items = result.split(',')
code = items[1]
name = items[2]
entity_id = f'block_cn_{code}'
the_list.append({
'id': entity_id,
'entity_id': entity_id,
'entity_type': EntityType.Block.value,
'exchange': 'cn',
'code': code,
'name': name,
'category': entity.value
})
return the_list
the_list = numba_boost_up(results)
if the_list:
df = pd.DataFrame.from_records(the_list)
df_to_db(df=df, ref_df=None, region=Region.CHN, data_schema=self.data_schema, provider=self.provider)
self.logger.info(f"finish record sina blocks:{entity.value}")
class EastmoneyChinaBlockStockRecorder(TimeSeriesDataRecorder):
region = Region.CHN
provider = Provider.EastMoney
entity_schema = Block
data_schema = BlockStock
# 用于抓取行业包含的股票
category_stocks_url = 'https://nufm.dfcfw.com/EM_Finance2014NumericApplication/JS.aspx?type=CT&cmd=C.{}{}&sty=SFCOO&st=(Close)&sr=-1&p=1&ps=300&cb=jsonp_B66B5BAA1C1B47B5BB9778045845B947&token=7bc05d0d4c3c22ef9fca8c2a912d779c'
def __init__(self, exchanges=None, entity_ids=None, codes=None, batch_size=10, force_update=False, sleeping_time=5,
default_size=zvt_config['batch_size'], real_time=False, fix_duplicate_way='add',
start_timestamp=None, end_timestamp=None, close_hour=0, close_minute=0) -> None:
super().__init__(EntityType.Block, exchanges, entity_ids, codes, batch_size, force_update, sleeping_time,
default_size, real_time, fix_duplicate_way, start_timestamp, end_timestamp, close_hour,
close_minute)
def generate_domain_id(self, entity, df, time_fmt=PD_TIME_FORMAT_DAY):
return entity.id + '_' + df['stock_id']
def record(self, entity, start, end, size, timestamps, http_session):
url = self.category_stocks_url.format(entity.code, '1')
text = sync_get(http_session, url, return_type='text')
if text is None:
return None
results = json_callback_param(text)
# @njit(nopython=True)
def numba_boost_up(results):
the_list = []
for result in results:
items = result.split(',')
stock_code = items[1]
stock_id = china_stock_code_to_id(stock_code)
the_list.append({
'stock_id': stock_id,
'stock_code': stock_code,
'stock_name': items[2],
})
return the_list
the_list = numba_boost_up(results)
if the_list:
df = pd.DataFrame.from_records(the_list)
return df
self.sleep()
return None
def format(self, entity, df):
df['timestamp'] = now_pd_timestamp(Region.CHN)
df['entity_id'] = entity.id
df['provider'] = self.provider.value
df['code'] = entity.code
df['name'] = entity.name
df['level'] = self.level.value
df['exchange'] = entity.exchange
df['entity_type'] = EntityType.Block.value
df['id'] = self.generate_domain_id(entity, df)
return df
__all__ = ['EastmoneyChinaBlockRecorder', 'EastmoneyChinaBlockStockRecorder']
if __name__ == '__main__':
# init_log('china_stock_category.log')
recorder = EastmoneyChinaBlockStockRecorder(codes=['BK0727'])
recorder.run()
|
[
"doncat99@gmail.com"
] |
doncat99@gmail.com
|
a25196a8f29cc48a0abcab0af5d74810790319c3
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/470/usersdata/281/112524/submittedfiles/Av2_Parte3.py
|
a4b0c34470239c11ef1a33686d04422e6413ad37
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 477
|
py
|
# -*- coding: utf-8 -*-
m=int(input('Digite a quantidade de listas desejada: '))
for i in range(0,m,1):
lista=[]
n=int(input('Digite a quantidade de elementos da %d lista: ' %(i+1)))
for i in range(0,n,1):
lista.append(int(input('Digite o %d elemento dessa lista: ' %(i+1))))
media=sum(lista)/len(lista)
for i in range(0,n,1):
soma=0
soma(i-media)**2
dp=((1/(n-1))*soma)**(1/2)
print(media)
print(dp)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
aca102ba379f86d774530313c359be0ea25547c8
|
747f759311d404af31c0f80029e88098193f6269
|
/extra-addons/hr_attendance_analysis/interface.py
|
01271421ed420fa708a35f11eb536752ed1a9217
|
[] |
no_license
|
sgeerish/sirr_production
|
9b0d0f7804a928c0c582ddb4ccb7fcc084469a18
|
1081f3a5ff8864a31b2dcd89406fac076a908e78
|
refs/heads/master
| 2020-05-19T07:21:37.047958
| 2013-09-15T13:03:36
| 2013-09-15T13:03:36
| 9,648,444
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,712
|
py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Clock Reader for OpenERP
# Copyright (C) 2004-2009 Moldeo Interactive CT
# (<http://www.moldeointeractive.com.ar>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import timeutils as tu
class Interface(object):
def __init__(self, cr, uid, pool, oid, otype):
self._parms = (cr, uid, pool)
self._cache = pool.get(otype).read(cr, uid, oid)
self._field = pool.get(otype).fields_get(cr, uid)
self._local_cache = {}
def __getitem__(self, name):
if name in self._local_cache:
return self._local_cache[name]
if name in self._cache:
ret = self._cache[name]
if isinstance(ret, bool): return ret
field = self._field[name]
if field['type'] in ['char','int','float', 'selection']:
_r = ret
elif field['type'] in ['datetime']:
_r = tu.dt(ret)
elif field['type'] in ['date']:
_r = tu.d(ret)
elif field['type'] in ['many2one']:
_r = Interface(*(self._parms + (ret[0] ,field['relation'])))
elif field['type'] in ['many2many', 'one2many']:
_r = map(lambda a: Interface(*(self._parms + a))
, zip(ret, [field['relation']]*len(ret)))
else:
raise NotImplementedError, \
"Not implemented for %s of type %s (%s)." % (name,
field['type'],
str(ret))
self._local_cache[name] = _r
return _r
else:
# raise ValueError, "Not exists %s in object." % name
return False
def __getattr__(self, name):
return self[name]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"geerish@omerp.net"
] |
geerish@omerp.net
|
1648baed214078a8926589e49711518dd6f5a517
|
49f61714a6f78d984fd2194d6064d84e891bc5b7
|
/2019-1/220/users/4258/codes/1647_2445.py
|
e74ad3e0112c554a5f274784d793ec04694fb134
|
[] |
no_license
|
psbarros/Variaveis3
|
b5c4e1517e7d94a846ee03791d25d5821a1c651c
|
3dcf6f810709ce03c78335acf9533e008a2ae125
|
refs/heads/master
| 2023-06-13T07:05:00.878430
| 2021-07-06T17:51:37
| 2021-07-06T17:51:37
| 383,549,597
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 207
|
py
|
escala = input("Escolha C para Celsius, ou F para Fahrenheit: ")
temp = float(input("Temperatura: "))
c = (5/9)*(temp - 32)
f = ((9/5)*temp) + 32
if(escala == "C"):
print(f)
if(escala == "F"):
print(c)
|
[
"psb@icomp.ufam.edu.br"
] |
psb@icomp.ufam.edu.br
|
21b8c9f44927459be125440bea1eff530f530da0
|
040236bf3bb45826c0bbc39e7432512ff420a0d1
|
/geomk/api/serializers.py
|
6c022a859e6e149bbc1d0f638e27c128eb57e92b
|
[] |
no_license
|
ThiagoDiasV/parking-lot-api
|
2768baf8921b9dc087616def8c93ccc4f2fe8cf5
|
5cb3f687099bea59740b0034aeebf9a65b791358
|
refs/heads/master
| 2022-12-13T02:25:50.754524
| 2020-02-12T12:08:32
| 2020-02-12T12:08:32
| 232,959,041
| 4
| 3
| null | 2022-03-08T21:10:08
| 2020-01-10T03:36:52
|
Python
|
UTF-8
|
Python
| false
| false
| 991
|
py
|
from .models import Car
from rest_framework import serializers
class CarSerializer(serializers.ModelSerializer):
class Meta:
model = Car
fields = "__all__"
read_only_fields = ["entry_time", "left_time", "time", "paid", "left"]
def create(self, validated_data: dict) -> Car:
"""
Overriding create function to avoid POST with cars that already
are at parking lot and don't left yet.
Cars with plate registered can only enter if they already left the last
time.
"""
try:
cars = Car.objects.filter(plate=validated_data.get("plate"))
last_register = cars.last()
if last_register:
if not last_register.left:
raise serializers.ValidationError(
"Car already at parking lot and don't left yet."
)
except IndexError:
pass
return Car.objects.create(**validated_data)
|
[
"thiago76ers@gmail.com"
] |
thiago76ers@gmail.com
|
27da08cfa78217f9a5c0fc73b6cccf72ff2e25ac
|
69a2f0c4419d0bf39d2fe46e8ff2ee117eaf237a
|
/mutilprocess/test.py
|
5b992fe9cd521106dc327da20aafd0555f827fc5
|
[] |
no_license
|
lxy5513/python
|
7da339e8ef6e2fa827e2da723c0f4e3595e11e04
|
228c3e631e642228de659e68f98ea938bcb2509f
|
refs/heads/master
| 2020-03-27T03:21:03.582842
| 2020-01-17T00:39:57
| 2020-01-17T00:39:57
| 145,855,837
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 817
|
py
|
import multiprocessing
import time
import collections
Queue = collections.deque(maxlen=10)
def consume(interval):
while True:
print("Queue: ", Queue)
if len(Queue) == 0:
print("no data")
time.sleep(0.5)
else:
num = Queue.pop()
print("Num: ", num)
time.sleep(0.5)
print("worker_1")
time.sleep(interval)
print("end worker_1")
def productor(interval):
while True:
print("productor")
time.sleep(interval)
Queue.append(1)
print("length of queue is: ", len(Queue))
print("end worker_2")
if __name__ == "__main__":
p1 = multiprocessing.Process(target = consume, args = (2,))
p2 = multiprocessing.Process(target = productor, args = (3,))
p1.start()
p2.start()
|
[
"lxy5513@gmail.com"
] |
lxy5513@gmail.com
|
883182e36ae3c57c73a7b281ee795b79299603a9
|
191fbcc96b9f0c74b88b001003f024064c973753
|
/gateware/rtl/platform/syzygy/boson.py
|
8ca2d666a8022c603a68c64631c4c2278825ce82
|
[
"BSD-2-Clause"
] |
permissive
|
gregdavill/boson-eth-firmware
|
f0b5895469260e414b90cd7e7e0fad37a5728159
|
056843c43fac6486114bfb916fb78a4f7d38e87c
|
refs/heads/main
| 2023-08-24T12:55:10.648741
| 2021-10-15T00:28:16
| 2021-10-15T00:28:16
| 374,504,758
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,166
|
py
|
# This file is Copyright (c) 2020 Gregory Davill <greg.davill@gmail.com>
# License: BSD
from litex.build.generic_platform import *
def boson_syzygy_r0d1(syzygy_id=0):
_id = f'SYZYGY{syzygy_id}'
return [
("Boson", 0,
Subsignal("data", Pins(f'{_id}:S27 {_id}:P2C_CLKN {_id}:D5P {_id}:S26 \
{_id}:D7N {_id}:D2P {_id}:D2N {_id}:S17 \
{_id}:D1N {_id}:S16 {_id}:D5N {_id}:S18 \
{_id}:C2P_CLKN {_id}:S25 {_id}:D1P {_id}:D6P \
{_id}:D4P {_id}:D0P {_id}:D6N {_id}:S23 \
{_id}:'),IOStandard("LVCMOS18"),Misc("SLEWRATE=SLOW")),
Subsignal("clk", Pins("A17"),IOStandard("LVCMOS18"),Misc("SLEWRATE=SLOW")),
Subsignal("vsync", Pins("A13"),IOStandard("LVCMOS18"),Misc("SLEWRATE=SLOW")),
Subsignal("hsync", Pins("D16"),IOStandard("LVCMOS18"),Misc("SLEWRATE=SLOW")),
Subsignal("valid", Pins("C16"),IOStandard("LVCMOS18"),Misc("SLEWRATE=SLOW")),
Subsignal("tx", Pins("A3"),IOStandard("LVCMOS18"),Misc("SLEWRATE=SLOW")),
Subsignal("rx", Pins("B9"),IOStandard("LVCMOS18"),Misc("SLEWRATE=SLOW")),
Subsignal("reset", Pins("B2"),IOStandard("LVCMOS18"),Misc("SLEWRATE=SLOW")),
Subsignal("ext_sync", Pins("B18"),IOStandard("LVCMOS18"),Misc("SLEWRATE=SLOW")),
Subsignal("rst_n", Pins("SYZYGY1:D5N"), IOStandard("LVCMOS18"),Misc("SLEWRATE=FAST TERMINATION=OFF")),
Subsignal("clk_p", Pins("SYZYGY1:D4P"), IOStandard("LVCMOS18"),Misc("SLEWRATE=FAST TERMINATION=OFF")),
Subsignal("clk_n", Pins("SYZYGY1:D4N"), IOStandard("LVCMOS18"),Misc("SLEWRATE=FAST TERMINATION=OFF")),
Subsignal("cs_n", Pins("SYZYGY1:D6P"), IOStandard("LVCMOS18"),Misc("SLEWRATE=SLOW TERMINATION=OFF")),
Subsignal("dq", Pins("SYZYGY1:D2N SYZYGY1:D0N SYZYGY1:D5P SYZYGY1:D2P SYZYGY1:D3P SYZYGY1:D1N SYZYGY1:D1P SYZYGY1:D0P"), IOStandard("LVCMOS18"),Misc("SLEWRATE=FAST TERMINATION=OFF")),
Subsignal("rwds", Pins("SYZYGY1:D3N"), IOStandard("LVCMOS18"),Misc("SLEWRATE=FAST TERMINATION=OFF")),
),
]
|
[
"greg.davill@gmail.com"
] |
greg.davill@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.