blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
061aebdb29e7c6907a3a9ac112f56e81483271f8
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_156/832.py
|
6c50802fc3b768a580b91d24aaf771150f9157d8
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,650
|
py
|
#!/usr/bin/env python
import sys
from math import ceil, floor
def solve_naive_test(pancakes_per_plate, minute=0):
if not any(pancakes_per_plate):
return minute
else:
splits = xrange(len(pancakes_per_plate))
eaten_one = [(x - 1) if x > 0 else 0 for x in pancakes_per_plate]
solutions = [solve_naive_test(eaten_one, minute + 1)]
for split in splits:
if pancakes_per_plate[split] > 2:
split_one = pancakes_per_plate[:split]
split_one.append(int(floor(pancakes_per_plate[split] / 2.0)))
split_one.append(int(ceil(pancakes_per_plate[split] / 2.0)))
split_one.extend(pancakes_per_plate[split + 1:])
solutions.append(solve_naive_test(split_one, minute + 1))
return min(solutions)
def one_eaten(plates):
return [(x - 1) if x > 0 else 0 for x in plates]
def mean(l):
return float(sum(l)) / len(l)
def max_split(slope, max_slope, max_count):
for p in xrange(1, int(max_slope / 2.0) + 1):
s = list(slope)
for x in xrange(max_count):
s[x] -= p
s.extend([p] * max_count)
#print '\t', s
yield s
def solve(pancakes_per_plate, minute=0, log=[]):
if not any(pancakes_per_plate):
return (minute, log)
else:
slope = list(sorted(pancakes_per_plate, reverse=True))
max_slope = max(slope)
max_count = sum(1 for p in slope if p == max_slope)
#print slope, max_slope, max_count
solutions = [solve(one_eaten(slope), minute + 1, log + ['eat'])]
if max_slope > 2:
for split in max_split(slope, max_slope, max_count):
solutions.append(
solve(
split,
minute + max_count,
log + ['split {x}'.format(x=max_count)],
))
return min(solutions, key=lambda k: k[0])
if __name__ == '__main__':
input_file_name = sys.argv[1]
input_file = open(input_file_name)
number_of_cases = int(input_file.readline().strip())
first_case_number = 1
for x in xrange(number_of_cases):
sys.stderr.write("{x}\n".format(x=x))
nonempty_plate_count = int(input_file.readline().strip())
pancakes_per_plate = [int(d) for d in input_file.readline().strip().split()]
#print "Input", pancakes_per_plate
solution, log = solve(pancakes_per_plate)
print "Case #{case_number}: {minutes_needed}".format(
case_number=(first_case_number + x),
minutes_needed=solution
)
#print log
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
db6b50f1fb9d0dd10bd09ef05b6d826b4e4dc87e
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/elbv2_read/ssl-policy_list_custom.py
|
03504aaa68bf723809ae33af5c520e79b7bfbca0
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,992
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import read_no_parameter_custom
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/elbv2/describe-ssl-policies.html
if __name__ == '__main__':
"""
"""
parameter_num = len(sys.argv)
if parameter_num != 3:
print("config value is not exist")
print("Usage: python {} <config> <template>".format(sys.argv[0]))
print("Support template : base, uid")
sys.exit(1)
profile_name = sys.argv[1]
template_name = sys.argv[2]
if template_name == "base":
output_name = "table"
query_name = """\"
SslPolicies[*].[
Name
,SslProtocols[] | join(',',@)
,Ciphers[].Name | join(',',@)
]
\""""
elif template_name == "uid":
output_name = "text"
query_name = """\"
\""""
else:
print("Usage : {} template name is not exist".format(template_name))
print("Support template : base, uid")
sys.exit(1)
change_query_name = query_name.replace("\n", "")
change_query_name = change_query_name.replace(" ", "")
add_option_dict = {}
#######################################################################
# setting option use
# ex: add_option_dict["setting_matching_parameter"] = "--owners"
# ex: add_option_dict["setting_key"] = "owner_id"
#######################################################################
# single parameter
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
#######################################################################
# custom parameter
add_option_dict["output"] = output_name
add_option_dict["query"] = change_query_name
read_no_parameter_custom("elbv2", "describe-ssl-policies", add_option_dict)
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
c7a70ffaa62499a456c09c3f1f3ef7cd0b05a611
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/p2DJ/New/program/pyquil/startPyquil387.py
|
06eb003d185853b93d56f5429d08368942bce8d7
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,341
|
py
|
# qubit number=2
# total number=20
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += H(1) # number=8
prog += CZ(0,1) # number=9
prog += H(1) # number=10
prog += H(1) # number=11
prog += CZ(0,1) # number=12
prog += H(1) # number=13
prog += H(1) # number=17
prog += CZ(0,1) # number=18
prog += H(1) # number=19
prog += CNOT(1,0) # number=14
prog += X(0) # number=15
prog += CNOT(1,0) # number=16
prog += Y(1) # number=6
prog += X(0) # number=4
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('1q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil387.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
1c84b99c1dbcc510063d63711b2450fd162f9f19
|
4f408d65db60911f56110c351cb3b64835e0c5fb
|
/caffe2/python/predictor/predictor_exporter_test.py
|
61e3fb0a5ad87ad8bfa994bb3bffe2727df0a74f
|
[
"Apache-2.0",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
KeyKy/caffe2_SSD
|
a02c065aef2dbcfd00faae8be0440d7a4ff0fb76
|
7235688ea5e212dbe8609d780dd94c8c7d9fef54
|
refs/heads/master
| 2021-09-18T14:36:11.247427
| 2018-07-10T09:59:35
| 2018-07-10T09:59:35
| 89,928,918
| 8
| 5
| null | 2018-07-27T02:14:38
| 2017-05-01T14:04:20
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,555
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tempfile
import unittest
import numpy as np
from caffe2.python import cnn, workspace, core
from caffe2.python.predictor_constants import predictor_constants as pc
import caffe2.python.predictor.predictor_exporter as pe
import caffe2.python.predictor.predictor_py_utils as pred_utils
class PredictorExporterTest(unittest.TestCase):
def _create_model(self):
m = cnn.CNNModelHelper()
m.FC("data", "y",
dim_in=5, dim_out=10,
weight_init=m.XavierInit,
bias_init=m.XavierInit)
return m
def setUp(self):
np.random.seed(1)
m = self._create_model()
self.predictor_export_meta = pe.PredictorExportMeta(
predict_net=m.net.Proto(),
parameters=[str(b) for b in m.params],
inputs=["data"],
outputs=["y"],
shapes={"y": (1, 10), "data": (1, 5)},
)
workspace.RunNetOnce(m.param_init_net)
self.params = {
param: workspace.FetchBlob(param)
for param in self.predictor_export_meta.parameters}
# Reset the workspace, to ensure net creation proceeds as expected.
workspace.ResetWorkspace()
def test_meta_constructor(self):
'''
Test that passing net itself instead of proto works
'''
m = self._create_model()
pe.PredictorExportMeta(
predict_net=m.net,
parameters=m.params,
inputs=["data"],
outputs=["y"],
shapes={"y": (1, 10), "data": (1, 5)},
)
def test_meta_net_def_net_runs(self):
for param, value in self.params.items():
workspace.FeedBlob(param, value)
extra_init_net = core.Net('extra_init')
extra_init_net.ConstantFill('data', 'data', value=1.0)
pem = pe.PredictorExportMeta(
predict_net=self.predictor_export_meta.predict_net,
parameters=self.predictor_export_meta.parameters,
inputs=self.predictor_export_meta.inputs,
outputs=self.predictor_export_meta.outputs,
shapes=self.predictor_export_meta.shapes,
extra_init_net=extra_init_net,
)
db_type = 'minidb'
db_file = tempfile.NamedTemporaryFile(
delete=False, suffix=".{}".format(db_type))
pe.save_to_db(
db_type=db_type,
db_destination=db_file.name,
predictor_export_meta=pem)
workspace.ResetWorkspace()
meta_net_def = pe.load_from_db(
db_type=db_type,
filename=db_file.name,
)
self.assertTrue("data" not in workspace.Blobs())
self.assertTrue("y" not in workspace.Blobs())
init_net = pred_utils.GetNet(meta_net_def, pc.PREDICT_INIT_NET_TYPE)
# 0-fills externalblobs blobs and runs extra_init_net
workspace.RunNetOnce(init_net)
self.assertTrue("data" in workspace.Blobs())
self.assertTrue("y" in workspace.Blobs())
print(workspace.FetchBlob("data"))
np.testing.assert_array_equal(
workspace.FetchBlob("data"), np.ones(shape=(1, 5)))
np.testing.assert_array_equal(
workspace.FetchBlob("y"), np.zeros(shape=(1, 10)))
# Load parameters from DB
global_init_net = pred_utils.GetNet(meta_net_def,
pc.GLOBAL_INIT_NET_TYPE)
workspace.RunNetOnce(global_init_net)
# Run the net with a reshaped input and verify we are
# producing good numbers (with our custom implementation)
workspace.FeedBlob("data", np.random.randn(2, 5).astype(np.float32))
predict_net = pred_utils.GetNet(meta_net_def, pc.PREDICT_NET_TYPE)
workspace.RunNetOnce(predict_net)
np.testing.assert_array_almost_equal(
workspace.FetchBlob("y"),
workspace.FetchBlob("data").dot(self.params["y_w"].T) +
self.params["y_b"])
def test_db_fails_without_params(self):
with self.assertRaises(Exception):
for db_type in ["minidb"]:
db_file = tempfile.NamedTemporaryFile(
delete=False, suffix=".{}".format(db_type))
pe.save_to_db(
db_type=db_type,
db_destination=db_file.name,
predictor_export_meta=self.predictor_export_meta)
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
e79c9f4bec7946370a9fb9d8c7ec083d5df103b7
|
f02485de5a101f3b69a45b2c4e71bd950ee55eba
|
/Z_other/del/ncPreprocessNew.py
|
f15f0e79cda38afbbe7ca272efe385042833c0f5
|
[] |
no_license
|
newjokker/PyUtil
|
ef4266b0ca32157f9de6e2cac1b1a10647190d99
|
32e64be10a6cd2856850f6720d70b4c6e7033f4e
|
refs/heads/master
| 2020-11-28T00:19:02.073391
| 2019-12-23T02:07:40
| 2019-12-23T02:07:40
| 229,654,616
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,221
|
py
|
# -*-coding:utf-8 -*-
import os
import sys
import datetime
import xml.dom.minidom
from xml.dom.minidom import Document
import datetime
# from Decorator.time_it import time_this
class NcFileToTiff(object):
"""传入 nc 文件,输出 tiff 文件"""
def __init__(self):
# 需要处理的文件
self.files_to_transform = []
# 保存的路径
self.save_dir = None
# 转换用到的 exe
self.gdal_translate_exe_path = None
@staticmethod
def get_file_time(file_path):
"""获取文件的时间,世界时转为北京时"""
file_base_name = os.path.basename(file_path) # 文件名
if 'ASI' in file_base_name:
UTC_str = file_base_name[-13:-3]
UTC_time = datetime.datetime.strptime(UTC_str, '%Y%m%d%H')
CHN_time = UTC_time + datetime.timedelta(hours=-8)
CHN_str = datetime.datetime.strftime(CHN_time, '%Y%m%d%H')
return CHN_str
elif 'CHN' in file_base_name:
return file_base_name[-13:-3]
else:
return
def get_save_path(self, file_path):
"""根据文件名,得到输出路径"""
CHN_time_str = self.get_file_time(file_path) # 获取北京时间
# 取到文件产品时间之前,
file_basename = os.path.basename(file_path)
# 输出文件夹
out_reladir = os.path.join(CHN_time_str[:6], CHN_time_str[:8])
out_reladir += r'\NRT' if '_NRT_' in file_basename else r'\RT'
out_reladir += r'\HOR' if '_HOR-' in file_basename else r'\DAY'
out_path = os.path.join(self.save_dir, out_reladir)
# 新建文件夹
if not os.path.exists(out_path):
os.makedirs(out_path)
#
geotif_name = file_basename[:-13] + CHN_time_str + '.tif' # 把 basename 的时间统一改为北京时间
out_geotif = os.path.join(out_path, geotif_name)
return out_geotif
def nc_to_tiff(self, nc_file, out_geotif):
"""调用cmd , nc 文件转为 tiff 文件"""
cmd_str = self.gdal_translate_exe_path + ' -a_srs WGS84 -of GTiff -sds ' + nc_file + ' ' + out_geotif
os.system(cmd_str)
# @time_this
def do_process(self):
"""主流程"""
for each_file in self.files_to_transform:
if not each_file.endswith('.nc'): # 过滤非 nc 文件
continue
# 得到返回值
save_path = self.get_save_path(each_file)
# 转换
self.nc_to_tiff(each_file, save_path)
class Xml_Util(object):
# ------------------------------ 写 xml ----------------------------------------------------------------------------
@staticmethod
def get_document(head_time_str):
"""返回 document, 用于写 xml 文件"""
document = Document()
document.appendChild(document.createComment(head_time_str))
return document
@staticmethod
def add_sub_node(document, curNode, nodeKey, nodeValue, nodeAtt=None):
"""在节点下添加子节点信息"""
if nodeAtt is None:
nodeAtt = {}
try:
child = document.createElement(nodeKey)
# 写属性
for attKey in nodeAtt:
child.setAttribute(attKey, nodeAtt[attKey])
# 写值
if nodeValue:
child_text = document.createTextNode(nodeValue)
child.appendChild(child_text)
# 添加节点
curNode.appendChild(child)
return child
except:
print("* error in add node")
return None
@staticmethod
def save_xml(document, xml_path):
"""将 xml 保存为本地文件"""
with open(xml_path, 'w') as f:
f.write(document.toprettyxml(indent='\t', encoding='utf-8'))
# ------------------------------ 读 xml ----------------------------------------------------------------------------
@staticmethod
def get_root_node(xml_path):
"""返回用于读 xml 的 collection"""
DOMTree = xml.dom.minidom.parse(xml_path)
root_node = DOMTree.documentElement
return root_node
@staticmethod
def get_info_from_node(eachNode, assign_attr=None):
"""现在只支持 Element"""
# -----------------------------------------------------------------
if eachNode.nodeType != 1:
return
# -----------------------------------------------------------------
element_info = {}
# -----------------------------------------------------------------
# 获得所有的属性
attr_dict = {}
if assign_attr:
assign_attr = set(assign_attr)
for each_attr in assign_attr:
attr_dict[each_attr] = eachNode.getAttribute(each_attr)
else:
for each_attr in eachNode.attributes.values():
attr_dict[each_attr.name] = each_attr.value
element_info['attr'] = attr_dict
# -----------------------------------------------------------------
# 得到值,有子节点就没有值,这一点要注意
node_value = None
if len(eachNode.childNodes) == 1:
if eachNode.childNodes[0].nodeType == 3:
node_value = eachNode.childNodes[0].nodeValue
element_info['value'] = node_value
# -----------------------------------------------------------------
# 得到子节点
# child_nodes = eachNode.childNodes
# -----------------------------------------------------------------
return element_info
# ------------------------------ 常用 ------------------------------------------------------------------------------
@staticmethod
def xml_parser(xml_path, need_tages, attr="identify"):
"""
读取xml文件(一个级别),保存为字典
:param xml_path: xml 文件路径
:param need_tages: 需要的标签名
:param attr: 需要的属性名
:return: {'attr':value}
"""
def get_key_value(oneNode, attr_temp):
"""读取标签"""
key = oneNode.getAttribute(attr_temp)
value = oneNode.childNodes[0].data
return key, value
xml_info = {}
DOMTree = xml.dom.minidom.parse(xml_path)
collection = DOMTree.documentElement
# 遍历节点
for each_tag in need_tages:
for eachNode in collection.getElementsByTagName(each_tag):
(info_key, info_value) = get_key_value(eachNode, attr)
xml_info[info_key] = info_value
return xml_info
if __name__ == '__main__':
a = NcFileToTiff()
# ------------------ 读取 issue 等,解析为需要的文件夹 ------------------------------------
# xml_path = r'Y:\inputXML\nc\inputXml\201908011259\201908011259.xml'
xml_path = r'D:\BaiduNetdiskDownload\Algo\201908011239.xml'
# xml_path = sys.argv[1]
xml_dict = Xml_Util.xml_parser(xml_path, ['input'])
save_path_xml = xml_dict['outXMLPath']
a.save_dir = xml_dict['outFolder']
a.gdal_translate_exe_path = xml_dict['gdal_translate']
# a.gdal_translate_exe_path = r'C:\ProgramData\Anaconda2\envs\QingHai\Library\bin\gdal_translate.exe'
a.files_to_transform = xml_dict['inputFile'].split(',')
a.do_process()
try:
a.do_process()
status = 'success'
except:
status = 'error'
# ---------------------------------------------------------------------------------
# 写 output xml 文件
if status == 'error':
status, info = '0', 'error'
else:
status, info = '1', 'success'
# 初始化
document = Xml_Util.get_document('ProcessTime:' + str(datetime.datetime.now()))
# 添加根节点
XML = Xml_Util.add_sub_node(document, document, 'XML', '', None)
# info 信息
log = Xml_Util.add_sub_node(document, XML, 'log', '', None)
Xml_Util.add_sub_node(document, log, 'status', status, None)
Xml_Util.add_sub_node(document, log, 'info', info, None)
Xml_Util.save_xml(document, save_path_xml)
print('ok')
|
[
"18761609908@163.com"
] |
18761609908@163.com
|
a3b82adff7f91cff3f104899144e29691bc4658b
|
cd40b7cc395f36740000ed4a4144b1c0666ab0fd
|
/tests/test_hstrat/test_auxiliary_lib/test_parse_from_numeral_system.py
|
16155f656f410b5cdd20c314308e9e350596ab27
|
[
"MIT"
] |
permissive
|
mmore500/hstrat
|
94fd22c86a87a5707590b9398ef679444ed82d6d
|
b2d2caded1db5e2dc681d9f171d7c74b322c55c3
|
refs/heads/master
| 2023-08-31T03:36:44.457576
| 2023-08-25T14:39:29
| 2023-08-25T14:39:29
| 464,531,144
| 5
| 2
|
NOASSERTION
| 2023-08-25T13:07:52
| 2022-02-28T15:11:45
|
Python
|
UTF-8
|
Python
| false
| false
| 2,824
|
py
|
import random
import string
import pytest
from hstrat._auxiliary_lib import (
parse_from_numeral_system,
render_to_numeral_system,
)
@pytest.fixture
def alphabet():
return string.digits + string.ascii_lowercase
def test_parse_from_numeral_system(alphabet):
assert parse_from_numeral_system("0", alphabet) == 0
assert parse_from_numeral_system("1", alphabet) == 1
assert parse_from_numeral_system("a", alphabet) == 10
assert parse_from_numeral_system("z", alphabet) == 35
assert parse_from_numeral_system("10", alphabet) == 36
assert parse_from_numeral_system("2s", alphabet) == 100
assert parse_from_numeral_system("2n9c", alphabet) == 123456
def test_render_and_parse_numeral_system_consistency(alphabet):
for __ in range(100):
num = random.randint(0, 1000000)
base_num = render_to_numeral_system(num, alphabet)
assert parse_from_numeral_system(base_num, alphabet) == num
for __ in range(100):
test_digits = "".join([random.choice(alphabet) for j in range(10)])
base_num = parse_from_numeral_system(test_digits, alphabet)
assert render_to_numeral_system(
base_num, alphabet
) == test_digits.lstrip("0")
def test_hex_parsing():
assert parse_from_numeral_system("0", "0123456789abcdef") == int("0", 16)
assert parse_from_numeral_system("1", "0123456789abcdef") == int("1", 16)
assert parse_from_numeral_system("0a3", "0123456789abcdef") == int(
"0a3", 16
)
assert parse_from_numeral_system("a3", "0123456789abcdef") == int("a3", 16)
assert parse_from_numeral_system("ff", "0123456789abcdef") == int("ff", 16)
def test_octal_parsing():
assert parse_from_numeral_system("0", "01234567") == int("0", 8)
assert parse_from_numeral_system("1", "01234567") == int("1", 8)
assert parse_from_numeral_system("052", "01234567") == int("052", 8)
assert parse_from_numeral_system("52", "01234567") == int("52", 8)
assert parse_from_numeral_system("777", "01234567") == int("777", 8)
def test_binary_parsing():
assert parse_from_numeral_system("0", "01") == int("0", 2)
assert parse_from_numeral_system("1", "01") == int("1", 2)
assert parse_from_numeral_system("0101101", "01") == int("0101101", 2)
assert parse_from_numeral_system("101101", "01") == int("101101", 2)
assert parse_from_numeral_system("111", "01") == int("111", 2)
def test_decimal_parsing():
assert parse_from_numeral_system("0", "0123456789") == int("0", 10)
assert parse_from_numeral_system("1", "0123456789") == int("1", 10)
assert parse_from_numeral_system("0123", "0123456789") == int("0123", 10)
assert parse_from_numeral_system("123", "0123456789") == int("123", 10)
assert parse_from_numeral_system("456", "0123456789") == int("456", 10)
|
[
"mmore500.login+gpg@gmail.com"
] |
mmore500.login+gpg@gmail.com
|
04d2d4b1fd96ad5507c4890234ccd8f5a3e0cd22
|
4d2a007fb3810d0b1de676d0193eb24cb0134180
|
/site.py
|
3d10e3e877d02cd1ca4193ab1643d66aed39797e
|
[] |
no_license
|
Rossonero/qlin
|
241a822402beb4b05fe69f024978148a5758a765
|
02cc017daf3e540cfd9b5bfe7e021c176d5e90e1
|
refs/heads/master
| 2021-01-18T00:26:45.747983
| 2012-02-13T01:43:12
| 2012-02-13T01:43:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 446
|
py
|
# -*- coding: utf-8 -*-
def Site_infor(request):
'''
站点信息展示前台
将所有信息展示开来
'''
t = get_template('site_infor.html')
#从数据库中将信息进行展示
cx = sq.connect('store/site.sqlite')
cu = self.cx.cursor()
#开始查找信息
strr = "select * from site"
cu.execute(strr)
print cu.fetchone()
html = t.render(Context({}))
return HttpResponse(html)
|
[
"superjom@gmail.com"
] |
superjom@gmail.com
|
aa3b3557c2779676f7b441397483f7569c7d4901
|
9ca6885d197aaf6869e2080901b361b034e4cc37
|
/ElectroWeakAnalysis/ZReco/python/zToTauTau_ETau_EventContent_AODSIM_cff.py
|
ff21d1bbfb2a6f565db5f6386bf52e4f0df33be0
|
[] |
no_license
|
ktf/cmssw-migration
|
153ff14346b20086f908a370029aa96575a2c51a
|
583340dd03481dff673a52a2075c8bb46fa22ac6
|
refs/heads/master
| 2020-07-25T15:37:45.528173
| 2013-07-11T04:54:56
| 2013-07-11T04:54:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 449
|
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.EventContent.EventContent_cff import *
from ElectroWeakAnalysis.ZReco.zToTauTau_ETau_EventContent_cff import *
AODSIMZToTauTauETauEventContent = cms.PSet(
outputCommands = cms.untracked.vstring()
)
AODSIMZToTauTauETauEventContent.outputCommands.extend(AODSIMEventContent.outputCommands)
AODSIMZToTauTauETauEventContent.outputCommands.extend(zToTauTauETauEventContent.outputCommands)
|
[
"sha1-197b93d87bf2e1eb4349df76c6ec25fd8f1f348e@cern.ch"
] |
sha1-197b93d87bf2e1eb4349df76c6ec25fd8f1f348e@cern.ch
|
3a2079455f632c92b4b8d209389e7bd7bd73ee00
|
b4a4fa30ae3f857e209356d1a28273b3cbbdc3e3
|
/api_basic/views.py
|
3b09cd7401a291568f6dc1dda8cbc8a97862ad2f
|
[] |
no_license
|
sahin88/Django_Rest_Framework_Account_Article_RestAPI
|
c0fd4b97eb5629c3b65781663dd82edc6f0de384
|
f5d7d12c04852392583f881f1e334d71c4582c4d
|
refs/heads/master
| 2023-03-25T19:02:17.427294
| 2020-04-21T09:26:51
| 2020-04-21T09:26:51
| 257,544,520
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,326
|
py
|
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse ,JsonResponse
from rest_framework.parsers import JSONParser
from .models import Article
from .serializers import ArticleSerializer
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import status
from rest_framework.views import APIView
from rest_framework import mixins
from rest_framework import generics
from rest_framework.authentication import SessionAuthentication, BasicAuthentication,TokenAuthentication
from rest_framework.permissions import IsAuthenticated
class GenericAPIViews(generics.GenericAPIView, mixins.ListModelMixin, mixins.CreateModelMixin, mixins.UpdateModelMixin,mixins.RetrieveModelMixin, mixins.DestroyModelMixin):
serializer_class=ArticleSerializer
queryset=Article.objects.all()
lookup_field='id'
authentication_classes=[SessionAuthentication, BasicAuthentication]
authentication_classes=[TokenAuthentication]
permission_classes=[IsAuthenticated]
def get(self, request, id=None):
if id:
return self.retrieve(request,id)
else:
return self.list(request)
def post(self, request):
return self.create(request)
def put(self, request, id=None):
return self.update(request, id)
def delete(self, request, id=None):
return self.destroy(request, id)
class ArticleView(APIView):
def get(self, request):
articles=Article.objects.all()
serializer=ArticleSerializer(articles, many=True)
return Response(serializer.data)
def post(self,request):
serializer=ArticleSerializer(data=request.data)
#request.POST # Only handles form data. Only works for 'POST' method.
#request.data # Handles arbitrary data. Works for 'POST', 'PUT' and 'PATCH' methods.
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.error, status.HTTP_400_BAD_REQUEST)
class ArticleDetailView(APIView):
def get(self, request, pk):
try:
article= Article.objects.get(pk=pk)
except:
return HttpResponse(status.HTTP_404_NOT_FOUND)
serializer= ArticleSerializer(article)
return Response(serializer.data)
def put(self, request, pk):
article=Article.objects.get(pk=pk)
serializer=ArticleSerializer(article, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self,request, pk):
article=Article.objects.get(pk=pk)
article.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
# Create your views here.
# @api_view(['GET', 'POST'])
# def article_list(request):
# if request.method=='GET':
# articles=Article.objects.all()
# serializer=ArticleSerializer(articles, many=True)
# return Response(serializer.data)
# elif request.method=='POST':
# serializer=ArticleSerializer(data=request.data)
# if serializer.is_valid():
# serializer.save()
# return Response(serializer.data, status=status.HTTP_201_CREATED)
# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# @api_view(['GET','PUT', 'DELETE'])
# @csrf_exempt
# def article_detail(request,pk):
# try:
# article=Article.objects.get(pk=pk)
# except:
# return HttpResponse(status.HTTP_404_NOT_FOUND)
# if request.method=='GET':
# serializer=ArticleSerializer(article)
# return Response(serializer.data)
# elif request.method=='PUT':
# #data=JSONParser().parse(request)
# serializer=ArticleSerializer(article, data=request.data)
# if serializer.is_valid():
# serializer.save()
# return Response(serializer.data)
# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# elif request.method=='DELETE':
# article.delete()
# return Response(serializer.errors, status=status.HTTP_204_NO_CONTENT)
|
[
"sahinmuratogur@gmail.com"
] |
sahinmuratogur@gmail.com
|
141fb7da59e137862d2a34b048d6dee9113a4465
|
6e601105760f09d3c9f5306e18e4cf085f0bb4a2
|
/10000-99999/10868.py
|
f86c7f9e367022589178de78365c22587a18d80c
|
[] |
no_license
|
WSJI0/BOJ
|
6412f69fddd46c4bcc96377e2b6e013f3bb1b524
|
160d8c13f72d7da835d938686f433e7b245be682
|
refs/heads/master
| 2023-07-06T15:35:50.815021
| 2023-07-04T01:39:48
| 2023-07-04T01:39:48
| 199,650,520
| 2
| 0
| null | 2020-04-20T09:03:03
| 2019-07-30T12:48:37
|
Python
|
UTF-8
|
Python
| false
| false
| 768
|
py
|
'''
10868번
최솟값
'''
import sys
input=sys.stdin.readline
def init(start, end, node):
if start==end:
tree[node]=a[start]
return tree[node]
mid=(start+end)//2
tree[node]=min(init(start, mid, node*2),init(mid+1, end, node*2+1))
return tree[node]
def findMin(start, end, node, left, right):
if left>end or right<start: return 9999999999
if left<=start and end<=right: return tree[node]
mid=(start+end)//2
return min(findMin(start, mid, node*2, left, right), findMin(mid+1, end, node*2+1, left, right))
n, m=map(int, input().split())
a=[]
tree=[9999999999]*(4*n)
for _ in range(n): a.append(int(input()))
init(0, n-1, 1)
for _ in range(m):
s, e=map(int, input().split())
print(findMin(0, n-1, 1, s-1, e-1))
|
[
"lifedev@naver.com"
] |
lifedev@naver.com
|
451f6e60c661a8a37b1823007d8f99a173a8ec2a
|
60139399c13b87c150573808d23c8f00542672a2
|
/admux/mixins/websites.py
|
4d3b238caf36bfa27f567f0d7fc5337f55b466db
|
[] |
no_license
|
adverserve/admux_client
|
d4781501b5e8486a072eb3dc6c19580292c517f8
|
1c633072e75d2f3be660565825a5f77c9699e68c
|
refs/heads/master
| 2021-01-10T20:29:45.695490
| 2013-10-24T14:52:21
| 2013-10-24T14:52:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 955
|
py
|
# coding: utf-8
from admux import helpers
class WebsitesClientMixin(object):
def websites(self, links=None, expand=None):
"""
http://admux-demo.trust-box.at/developer/api/v1/get/websites/
links: Boolean
expand: array of strings
"""
url = '/websites'
params = {
'links': helpers._bool(links),
'expand': helpers._list(expand),
}
return self._request('GET', url, params=params)
def website(self, uuid, links=None, expand=None):
"""
http://admux-demo.trust-box.at/developer/api/v1/get/websites/uuid/
uuid: website identifier
links: Boolean
expand: array of strings
"""
url = '/websites/%(uuid)s' % { 'uuid': uuid, }
params = {
'links': helpers._bool(links),
'expand': helpers._list(expand),
}
return self._request('GET', url, params=params)
|
[
"stephan.poetschner@gmail.com"
] |
stephan.poetschner@gmail.com
|
d3a372d9cda192f974f6edac19afaf207d458e26
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02785/s474912270.py
|
3746381a7a22f2e9f041861db8ba3b5bb71bc13a
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
import sys
input = sys.stdin.readline
def I(): return int(input())
def MI(): return map(int, input().split())
def LI(): return list(map(int, input().split()))
def main():
mod=10**9+7
N,K=MI()
h=LI()
h.sort()
if K>=N:
print(0)
exit()
for i in range(K):
h[-1-i]=0
ans=sum(h)
print(ans)
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
34e433c211df58f96eb9e335278588ea0f8e6417
|
5ddb3d3095ae782671d4592549e1fec38b73eb84
|
/spacy/tests/pipeline/test_functions.py
|
fbb88ade26a6127552ab9bcb9f6ab40c4e39d3ef
|
[
"MIT"
] |
permissive
|
Ali-Tahir/spaCy
|
c1ea9c5389044bc1e1f0e30f176b9b6e60a10690
|
9e210fa7fdb8e376655e7a7ab7debd3ffd718a63
|
refs/heads/master
| 2020-08-30T02:27:16.538275
| 2019-10-28T22:59:03
| 2019-10-28T22:59:03
| 218,234,283
| 0
| 0
|
MIT
| 2020-02-09T06:47:46
| 2019-10-29T08:05:03
| null |
UTF-8
|
Python
| false
| false
| 882
|
py
|
# coding: utf-8
from __future__ import unicode_literals
import pytest
from spacy.pipeline.functions import merge_subtokens
from ..util import get_doc
@pytest.fixture
def doc(en_tokenizer):
# fmt: off
text = "This is a sentence. This is another sentence. And a third."
heads = [1, 0, 1, -2, -3, 1, 0, 1, -2, -3, 1, 1, 1, 0]
deps = ["nsubj", "ROOT", "subtok", "attr", "punct", "nsubj", "ROOT",
"subtok", "attr", "punct", "subtok", "subtok", "subtok", "ROOT"]
# fmt: on
tokens = en_tokenizer(text)
return get_doc(tokens.vocab, words=[t.text for t in tokens], heads=heads, deps=deps)
def test_merge_subtokens(doc):
doc = merge_subtokens(doc)
# get_doc() doesn't set spaces, so the result is "And a third ."
assert [t.text for t in doc] == ["This", "is", "a sentence", ".", "This", "is", "another sentence", ".", "And a third ."]
|
[
"ines@ines.io"
] |
ines@ines.io
|
eff19506d086bf10caf3a7ff7cc64b66606e87dd
|
888899f0cb3e6e7b28a9de39001a1fd1c177cd35
|
/COMPLETE PYTHON-3 COURSE/Chapter-19-READ_WRITE_CSV_FILES/read_write_csv_simultanously.py
|
4d67f080dfecfeb586057bc4ec3209854e177e35
|
[] |
no_license
|
VivakaNand/COMPLETE_PYTHON_3
|
ef162d71d3a44bf661fcc1a8aacce31e7953cd7c
|
b3b835afe7671fdc3d29d912650fd4ccd3bc83f6
|
refs/heads/master
| 2023-02-04T10:13:41.881939
| 2020-12-23T08:30:51
| 2020-12-23T08:30:51
| 323,839,528
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 583
|
py
|
# reader, DictReader
# writer, DictWriter
from csv import DictWriter, DictReader
with open('final.csv', 'r') as rf:
with open('file2.csv', 'w',newline='') as wf:
csv_reader = DictReader(rf)
csv_writer = DictWriter(wf, fieldnames=['first_name','last_name','age'])
csv_writer.writeheader()
for row in csv_reader:
fname,lname,age = row['firstname'],row['lastname'],row['age']
csv_writer.writerow({
'first_name': fname.upper(),
'last_name':lname.upper(),
'age':age
})
|
[
"vivekjetani83@gmail.com"
] |
vivekjetani83@gmail.com
|
1a7330518eff453cd30004ae2fb01e90bfe63fbe
|
80616a1bfacc2081d983c3d8ec5b3f55295a323b
|
/serial_coms_list.py
|
2bee4f29591ba132b7218896bc837498d8e321c6
|
[] |
no_license
|
choyai/kis-personal-space-bot
|
47a681b679b7b6caf2c3b10124603e4475e3b73b
|
8dbce0b6cd88a1b326a2e9e4c9b3cf5120a083ee
|
refs/heads/master
| 2020-06-07T11:19:19.921167
| 2019-06-24T07:14:15
| 2019-06-24T07:14:15
| 193,010,563
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,085
|
py
|
# from scipy.spatial import distance as dist
# from imutils import perspective
# from imutils import contours
import numpy as np
# import argparse
# import imutils
# import cv2
import serial
import time
import struct
# from InverseKinematics import *
# from Graph import astar
import pickle
from math import pi
from random import randint
countsPerMillimeter = (321 / 300 * 400) / (np.pi * 10)
countsPerMillimeter_z = (12 * 66) / (np.pi * 12)
# Connect to mcu
setPath = {
}
def autoConnect(baud, portName):
while(1):
try:
serialDevice = serial.Serial()
serialDevice.baudrate = baud
# serialDevice.parity = 'E'
serialDevice.port = portName
serialDevice.timeout = 1
serialDevice.rts = 0
serialDevice.dtr = 0
serialDevice.open()
print('connected to mcu')
return serialDevice
except:
print('connection failed')
pass
def sendCommand(command, ser):
ser.write(bytes(command))
# while(1):
# if ser.inWaiting() > 0:
# # data = ser.read(1)
# # print("data =", ord(data))
# response = ser.readline().decode('utf-8')
# print(response)
# # if response != 'received' or 'starting':
# # pass
# if response == 'resend':
# ser.write(bytes(command))
# elif response == 'done':
# return 1
# elif response == 'starting':
# break
# else:
# pass
# # return 0
# # break
# # else:
# # ser.write(bytes(command))
def setHome(ser):
buffer = [255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
checksum = 0
for i in buffer:
checksum += i
checksum = checksum % 256
buffer.append(checksum)
print('sending ')
print(buffer)
sendCommand(buffer, ser)
def setPosXY(x, y, ser):
buffer = [255, 255, 1]
# a = int(np.sqrt(2) / 2 * (y - x))
# b = int(np.sqrt(2) / 2 * (y + x))
print("x = " + str(x / countsPerMillimeter))
print("y = " + str(y / countsPerMillimeter))
# print("a = " + str(a))
# print("b = " + str(b))
x_sign = 0 if x >= 0 else 1
y_sign = 0 if y >= 0 else 1
buffer.append(x_sign)
buffer.extend(split_large_ints(abs(x)))
buffer.append(y_sign)
buffer.extend(split_large_ints(abs(y)))
checksum = 0
for i in buffer:
checksum += i
checksum = checksum % 256
buffer.append(checksum)
print('sending ')
print(buffer)
sendCommand(buffer, ser)
#
#
# def setPosXY_mm(x, y, ser, x_pix, y_pix, countsPerMillimeter=countsPerMillimeter):
# buffer = [255, 255, 1]
# x = x * countsPerMillimeter
# y = y * countsPerMillimeter
#
# a = int(np.sqrt(2) / 2 * (y - x))
# b = int(np.sqrt(2) / 2 * (y + x))
# print("x = " + str(x / countsPerMillimeter))
# print("y = " + str(y / countsPerMillimeter))
# print("a = " + str(a))
# print("b = " + str(b))
# a_sign = 0 if a >= 0 else 1
# b_sign = 0 if b >= 0 else 1
# buffer.extend(split_large_ints(abs(a)))
# buffer.extend(split_large_ints(abs(b)))
# buffer.extend([a_sign, b_sign])
# checksum = 0
# for i in buffer:
# checksum += i
# checksum = checksum % 256
# buffer.append(checksum)
# print('sending ')
# print(buffer)
# sendCommand(buffer, ser)
#
#
# def setPosZ(z, ser):
# buffer = [255, 255, 2]
# buffer.extend(split_large_ints(z))
# buffer.extend([0, 0, 0, 0])
# checksum = 0
# for i in buffer:
# checksum += i
# checksum = checksum % 256
# buffer.append(checksum)
# print('sending ')
# print(buffer)
# sendCommand(buffer, ser)
#
#
# def setPosZ_mm(z, ser, countsPerMillimeter_z):
# buffer = [255, 255, 2]
# z = int(z * countsPerMillimeter_z)
# buffer.extend(split_large_ints(z))
# buffer.extend([0, 0, 0, 0])
# checksum = 0
# for i in buffer:
# checksum += i
# checksum = checksum % 256
# buffer.append(checksum)
# print('sending ')
# print(buffer)
# sendCommand(buffer, ser)
#
#
# def gripClose(ser):
# buffer = [255, 255, 3, 0, 0, 0, 0, 0, 0]
# checksum = 0
# for i in buffer:
# checksum += i
# checksum = checksum % 256
# buffer.append(checksum)
# print('sending ')
# print(buffer)
# sendCommand(buffer, ser)
#
#
# def gripOpen(ser):
# buffer = [255, 255, 4, 0, 0, 0, 0, 0, 0]
# checksum = 0
# for i in buffer:
# checksum += i
# checksum = checksum % 256
# buffer.append(checksum)
# print('sending ')
# print(buffer)
# sendCommand(buffer, ser)
#
#
# def gripHalf(duty, ser):
# buffer = [255, 255, 10, ]
# buffer.extend(split_large_ints(duty))
# buffer.extend([0, 0, 0, 0])
# checksum = 0
# for i in buffer:
# checksum += i
# checksum = checksum % 256
# buffer.append(checksum)
# print('sending ')
# print(buffer)
# sendCommand(buffer, ser)
#
#
# def gripRotate(angle, ser):
# buffer = [255, 255, 5]
# buffer.extend(split_large_ints(angle))
# buffer.extend([0, 0, 0, 0])
# checksum = 0
# for i in buffer:
# checksum += i
# checksum = checksum % 256
# buffer.append(checksum)
# print('sending ')
# print(buffer)
# sendCommand(buffer, ser)
#
#
# def setAGains(K_P, K_I, K_D, ser):
# buffer = [255, 255, 6]
# buffer.extend(split_floats(K_P))
# buffer.extend(split_floats(K_I))
# buffer.extend(split_floats(K_D))
# checksum = 0
# for i in buffer:
# checksum += i
# checksum = checksum % 256
# buffer.append(checksum)
# print('sending ')
# print(buffer)
# sendCommand(buffer, ser)
#
#
# def setBGains(K_P, K_I, K_D, ser):
# buffer = [255, 255, 7]
# buffer.extend(split_floats(K_P))
# buffer.extend(split_floats(K_I))
# buffer.extend(split_floats(K_D))
# checksum = 0
# for i in buffer:
# checksum += i
# checksum = checksum % 256
# buffer.append(checksum)
# print('sending ')
# print(buffer)
# sendCommand(buffer, ser)
#
#
# def setZGains(K_P, K_I, K_D, ser):
# buffer = [255, 255, 8]
# buffer.extend(split_floats(K_P))
# buffer.extend(split_floats(K_I))
# buffer.extend(split_floats(K_D))
# checksum = 0
# for i in buffer:
# checksum += i
# checksum = checksum % 256
# buffer.append(checksum)
# print('sending ')
# print(buffer)
# sendCommand(buffer, ser)
#
#
# def setTolerances(ser):
# buffer = [255, 255, 9]
# tolerances = []
# for al in ['a', 'b', 'z']:
# while(1):
# try:
# tolerance = (int(input("set tolerance_" + al + ": ")))
# buffer.extend(split_large_ints(tolerance))
# break
# except:
# print("try again")
# checksum = 0
# for i in buffer:
# checksum += i
# checksum = checksum % 256
# buffer.append(checksum)
# print('sending ')
# print(buffer)
# sendCommand(buffer, ser)
# splits large ints into msb and lsb. Doesn't support ints larger than 16 bits
def split_large_ints(num):
# numstring = str(hex(num))
# lsB = '0x'
# msB = '0x'
# if len(numstring) < 5:
# msB = '0x00'
# else:
# if len(numstring) == 5:
# msB += numstring[2]
# else:
# msB = msB + numstring[len(numstring) - 4] + \
# numstring[len(numstring) - 3]
# if len(numstring) < 4:
# lsB += numstring[len(numstring) - 1]
# else:
# lsB = lsB + numstring[len(numstring) - 2] + \
# numstring[len(numstring) - 1]
msB = (num // 256) % 256
lsB = num % 256
return [msB, lsB]
# splits floats from their decimals and turns them into ints
def split_floats(num):
a, b = divmod(num, 1.0)
a = int(a) % 256
b = int(b * 256)
return [a, b]
|
[
"choyaichaiyo@gmail.com"
] |
choyaichaiyo@gmail.com
|
1de6c30ae80ba4c72a4ba8bcee65ddbea304c734
|
747142c31420254647caf85cc8532f4d13b00146
|
/django-web/bare-bones-web/adoptions/adoptions/models.py
|
96eccf0f66d1489ffac826247486abafdc94a5ca
|
[] |
no_license
|
jankidepala/machine-learning-IOT
|
fdf5f9b9c767405a940b1963d5f68b219e437c51
|
125bac2a3a023779e79f89dcf4b7304e6e2601ae
|
refs/heads/master
| 2022-11-19T05:35:50.180173
| 2020-07-25T08:15:28
| 2020-07-25T08:15:28
| 107,033,248
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 506
|
py
|
from django.db import models
# Create your models here.
class Pet(models.Model):
name = models.CharField(max_length=101)
submitter = models.CharField(max_length=101)
species = models.CharField(max_length=30)
breed = models.CharField(max_length=30, blank=True)
name = models.CharField(max_length=101)
submission_date = models.DateTimeField()
vaccinations = models.ManyToManyField('Vaccine', blank=True)
class Vaccine(models.Model):
name = models.CharField(max_length=50)
|
[
"you@example.com"
] |
you@example.com
|
140216cf494403c143fccbc18bd45cf02c208cdb
|
789a540bbb79c334cbeaf3687876bfd939e4290b
|
/app/handlers/private/admin/message/menu/message_distribution/distribution_options/wait_confirm_activate_schedule.py
|
d89fe3846c47add5b6d184ed3e7278c4db191b2a
|
[] |
no_license
|
ExissBrr/TRIGON-GARANT-BOT
|
2cc96f5f6f195f4e76c164db4f8acafbfa5b7662
|
812acf060eb92e6fad21568a75e6dba7ce0da4d9
|
refs/heads/main
| 2023-07-04T18:22:43.507453
| 2021-08-17T14:51:30
| 2021-08-17T14:51:30
| 392,725,437
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 813
|
py
|
from aiogram.dispatcher import FSMContext
from aiogram.types import Message
from app.data import text
from app.loader import dp
from app.states.private.message_distribution import MessageSendingStates
from app.utils.bot import send_main_keyboard
from app.utils.db_api.models.messages_for_sending import MessageForSending
@dp.message_handler(state=MessageSendingStates.wait_confirm_activate_schedule)
async def delete_schedule(message: Message, lang_code, state: FSMContext, user, state_data: dict):
schedule_id = int(state_data.get('message_id'))
schedule_message = await MessageForSending.get(schedule_id)
await schedule_message.update_data(is_active=True)
await message.answer(
text=text[lang_code].admin.message.schedule_was_activated
)
await send_main_keyboard(user, state)
|
[
"vladbelykh2002@gmail.com"
] |
vladbelykh2002@gmail.com
|
faded2f996b520e49df380a399e225a2a20919d5
|
f34219ce57fbe6b446fb4f1aa966f136a9b3bf42
|
/ex26.py
|
6dc076019fe108fa44d2e7b17af7da7080d86319
|
[] |
no_license
|
DikranHachikyan/python-programming-20200323
|
467f60c07ac47e4146e63967eeb1acb76b8dd21c
|
e6d6185561969fc478caaedcd0981476d56d5c5b
|
refs/heads/master
| 2021-04-20T09:52:25.151484
| 2020-04-09T07:24:33
| 2020-04-09T07:24:33
| 249,673,012
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
# 1. декларация
def addNumbers(a, b, c = None):
res = 0
if c:
res = a + b + c
else:
res = a + b
return res
if __name__ == '__main__':
# 2. извикване
x, y = 7, 8
r = addNumbers(x,y)
print(f'{x} + {y} = {r}')
z = 10
r = addNumbers(x, y, z)
print(f'{x} + {y} + {z} = {r}')
|
[
"dhachikian@expert-bg.org"
] |
dhachikian@expert-bg.org
|
01987f0ad902f03e4f8143a9902372ec5a23fdac
|
100802fd56febbe28e11d45802e0ad661a9b98c4
|
/Community/migrations/0025_auto_20170917_1324.py
|
b00847fda82521674abe2ec4ce8c57358fab7431
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
ByrdOfAFeather/AlphaTrion
|
efc4cfcfa246adde5a0ce84eb5e295c0c61722f5
|
90b00b8f4e4c7fe3c495a5ded14b47c3210119ea
|
refs/heads/master
| 2021-01-22T07:35:43.403968
| 2017-12-03T15:50:41
| 2017-12-03T15:50:41
| 102,306,857
| 0
| 2
| null | 2017-11-12T18:32:35
| 2017-09-04T01:39:45
|
Python
|
UTF-8
|
Python
| false
| false
| 502
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-17 17:24
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Community', '0024_merge_20170906_2028'),
]
operations = [
migrations.AlterModelOptions(
name='communitypacingratings',
options={'verbose_name': 'Community Pacing Ratings', 'verbose_name_plural': 'Community Pacing Ratings'},
),
]
|
[
"matthew_a_byrd@outlook.com"
] |
matthew_a_byrd@outlook.com
|
8f4684429d545ecf7164585be6213989c7b40f50
|
15581a76b36eab6062e71d4e5641cdfaf768b697
|
/Topics/Bit Manipulation/Hamming Distance.py
|
0f20d3169bb1b5a3f103505b9745db2a91f036ec
|
[] |
no_license
|
MarianDanaila/Competitive-Programming
|
dd61298cc02ca3556ebc3394e8d635b57f58b4d2
|
3c5a662e931a5aa1934fba74b249bce65a5d75e2
|
refs/heads/master
| 2023-05-25T20:03:18.468713
| 2023-05-16T21:45:08
| 2023-05-16T21:45:08
| 254,296,597
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,147
|
py
|
# Approach 1: Just check every bit in both numbers and increment when they are different
class Solution:
def hammingDistance(self, x: int, y: int) -> int:
hamming_distance = 0
while x != 0 or y != 0:
if x % 2 != y % 2:
hamming_distance += 1
x = x >> 1
y = y >> 1
return hamming_distance
# Approach 2: Just make XOR of x and y and after that count the number of '1' bits.
# because XOR of two different bits is always 1
class Solution:
def hammingDistance(self, x: int, y: int) -> int:
hamming_distance = 0
new = x ^ y
while new > 0:
if new % 2 == 1:
hamming_distance += 1
new = new >> 1
return hamming_distance
# Approach 3: Again make XOR of x and y but when we count the number of '1' bits
# we make the trick n&(n-1) which removes last '1' bit
class Solution:
def hammingDistance(self, x: int, y: int) -> int:
hamming_distance = 0
new = x ^ y
while new > 0:
new = new & (new-1)
hamming_distance += 1
return hamming_distance
|
[
"mariandanaila01@gmail.com"
] |
mariandanaila01@gmail.com
|
81043998a660fac47d3cc846d5dcfef5ca7aba85
|
1ef56dcfef70ee14df8956eedd171f74406594af
|
/cms/dal/content_dal.py
|
6377504eafc1dd0140adba7c9c999684394a5e50
|
[] |
no_license
|
journeyends/webtest
|
6c54ff19e01cd0cd99a34bcae55dd5701abf132f
|
2a24c6d7c52aa627edfbba3dd5eb9ccc16abe9fb
|
refs/heads/master
| 2020-04-17T07:38:52.873722
| 2019-03-21T09:36:50
| 2019-03-21T09:36:50
| 166,378,280
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 665
|
py
|
from cms.entity.content_entity import ContentModel
class ContentDal:
def getListByCondition(self, channelId, categoryId):
search_dict = dict()
search_dict['is_on'] = 1
if channelId is not None and int(channelId) > 0:
search_dict['channel_id'] = int(channelId)
if categoryId is not None and int(categoryId) > 0:
search_dict['category_id'] = int(categoryId)
obj = ContentModel.objects.filter(**search_dict) \
.values('id', 'title', 'channel_id', 'category_id')
return obj
def getById(self, id):
obj = ContentModel.objects.filter(id=id).first()
return obj
|
[
"songliyang@goldmantis.com"
] |
songliyang@goldmantis.com
|
995de2292dda0406dc843356accff4f284d58da4
|
5ade44090b99ba19edd5cc0b07e4ebf1f8cc416e
|
/introduction.to.programming.with.turtle/5-1-1.45Tree.py
|
f06665fd7736a2cfefeee5d89c8d7619611ac0e3
|
[
"MIT"
] |
permissive
|
Mito445/programming_with_python
|
af2f201fc3f13cab88fdaa708ecda6da05ad1b46
|
1114eaa7432ea8581d880bcebd1813a9fb873cce
|
refs/heads/master
| 2020-05-27T13:00:53.085344
| 2019-05-21T07:59:18
| 2019-05-21T07:59:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 311
|
py
|
from easygraphics.turtle import *
def branch(len, level):
if level == 0:
return
fd(len)
lt(45)
branch(len / 2, level - 1)
rt(90)
branch(len / 2, level - 1)
lt(45)
bk(len)
create_world(800, 600)
set_speed(100)
pu()
bk(200)
pd()
branch(200, 5)
pause()
close_world()
|
[
"royqh1979@gmail.com"
] |
royqh1979@gmail.com
|
f063629eadd4d9ceba806ccfa8b53d59a11e2640
|
1a639d185f9c883b7bebf33c577c58b22ac93c7e
|
/tkinter_learn/54_bind_multiple.py
|
bfd735893e6af94287c8c1d035feacf70951cf2d
|
[] |
no_license
|
gofr1/python-learning
|
bd09da5b5850b1533a88b858690ed4380b55d33e
|
19343c985f368770dc01ce415506506d62a23285
|
refs/heads/master
| 2023-09-02T15:42:27.442735
| 2021-11-12T10:17:13
| 2021-11-12T10:17:13
| 237,828,887
| 0
| 0
| null | 2021-11-12T10:17:14
| 2020-02-02T20:03:42
|
Python
|
UTF-8
|
Python
| false
| false
| 696
|
py
|
from tkinter import Tk, mainloop
from tkinter import ttk
root = Tk()
label0 = ttk.Label(root, text='Label 0')
label1 = ttk.Label(root, text='Label 1')
label0.pack()
label1.pack()
label0.bind('<ButtonPress>', lambda e: print('<ButtonPress> Label'))
label0.bind('<1>', lambda e: print('<1> Label'))
root.bind('<1>', lambda e: print('<1> Root'))
# in that case when left button click is binded both on label and root window
# now when you click on label0 both events will occur:
# <1> Label
# <1> Root
# so lets unbind left-click event from label
label0.unbind('<1>')
# now we will have
# <ButtonPress> Label
# <1> Root
root.bind_all('<Escape>', lambda e: print('Escape!!!'))
root.mainloop()
|
[
"gofr.one@gmail.com"
] |
gofr.one@gmail.com
|
22c0cf9ad666b05c4b4de4efde05cbcbc5637ef7
|
32cb84dd41e4be24c065bb205f226f9b121a6db2
|
/swimmingpool/apps.py
|
858653df0fdd5d3f7907fc3a24965f943afd53a9
|
[] |
no_license
|
InformatykaNaStart/staszic-sio2
|
b38fda84bd8908472edb2097774838ceed08fcfa
|
60a127e687ef8216d2ba53f9f03cfaa201c59e26
|
refs/heads/master
| 2022-06-29T11:09:28.765166
| 2022-06-13T21:56:19
| 2022-06-13T21:56:19
| 115,637,960
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 140
|
py
|
from __future__ import unicode_literals
from django.apps import AppConfig
class SwimmingpoolConfig(AppConfig):
name = 'swimmingpool'
|
[
"hugo@staszic.waw.pl"
] |
hugo@staszic.waw.pl
|
4791c02757534b823afff98e8e1f831eef411ee8
|
0cad32e2abd77ba43150b6e6a5f3a056f5d0cb13
|
/rec_test/spark_test2.py
|
93e887aa7a7d7ee6e6a5cb6a88eb79beec3a727a
|
[] |
no_license
|
luohuayong/spark
|
73cf3720e588cf0c6a1d2f9f6c05387ac8d45295
|
1ff2180ac132405796daa8e5664ec4272f64a2f6
|
refs/heads/master
| 2020-04-06T06:53:58.201572
| 2016-08-24T11:21:24
| 2016-08-24T11:21:24
| 63,468,310
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 734
|
py
|
from pyspark.mllib.recommendation import ALS,Rating
from pyspark import SparkContext
from pyspark.sql import SQLContext,Row
import os
os.environ['SPARK_CLASSPATH'] = "/home/leo/spark/lib/postgresql-9.3-1103.jdbc41.jar"
sc = SparkContext("local[2]","first spark app")
sqlContext = SQLContext(sc)
url = "jdbc:postgresql://localhost/sparktest?user=leo&password=123123"
data = sqlContext.load(source="jdbc",url=url,dbtable="public.user_rating")
print data.first()
ratings = data.map(lambda x:Rating(int(x[0]),int(x[1]),float(x[2])))
#print ratings.first()
model = ALS.train(ratings,50)
#features = model.userFeatures()
#print features.take(2)
predict = model.predict(2,2)
print predict
top = model.recommendProducts(2,10)
print top
|
[
"luohuayong@163.com"
] |
luohuayong@163.com
|
2f45c288c1a0d41d6ff5a1c1d40c94d3c9c32698
|
2e86ab87cb86ae4ffed162bc3e1109da7b362915
|
/supervisor/dbus/rauc.py
|
0a29112edaff028d4afed4370813fd5ff60e637c
|
[
"Apache-2.0"
] |
permissive
|
swipswaps/supervisor
|
c5cae4ecad4d9bc53191c3882f4337d3edc5219a
|
2d294f68418fbbcdbad776cdd9c614ef952074c4
|
refs/heads/main
| 2023-01-24T13:18:51.463358
| 2020-11-30T17:00:12
| 2020-11-30T17:00:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,321
|
py
|
"""D-Bus interface for rauc."""
import logging
from typing import Optional
from ..exceptions import DBusError, DBusInterfaceError
from ..utils.gdbus import DBus
from .const import (
DBUS_ATTR_BOOT_SLOT,
DBUS_ATTR_COMPATIBLE,
DBUS_ATTR_LAST_ERROR,
DBUS_ATTR_OPERATION,
DBUS_ATTR_VARIANT,
DBUS_NAME_RAUC,
DBUS_NAME_RAUC_INSTALLER,
DBUS_NAME_RAUC_INSTALLER_COMPLETED,
DBUS_OBJECT_BASE,
RaucState,
)
from .interface import DBusInterface
from .utils import dbus_connected
_LOGGER: logging.Logger = logging.getLogger(__name__)
class Rauc(DBusInterface):
"""Handle D-Bus interface for rauc."""
def __init__(self):
"""Initialize Properties."""
self._operation: Optional[str] = None
self._last_error: Optional[str] = None
self._compatible: Optional[str] = None
self._variant: Optional[str] = None
self._boot_slot: Optional[str] = None
async def connect(self):
"""Connect to D-Bus."""
try:
self.dbus = await DBus.connect(DBUS_NAME_RAUC, DBUS_OBJECT_BASE)
except DBusError:
_LOGGER.warning("Can't connect to rauc")
except DBusInterfaceError:
_LOGGER.warning("Host has no rauc support. OTA updates have been disabled.")
@property
def operation(self) -> Optional[str]:
"""Return the current (global) operation."""
return self._operation
@property
def last_error(self) -> Optional[str]:
"""Return the last message of the last error that occurred."""
return self._last_error
@property
def compatible(self) -> Optional[str]:
"""Return the system compatible string."""
return self._compatible
@property
def variant(self) -> Optional[str]:
"""Return the system variant string."""
return self._variant
@property
def boot_slot(self) -> Optional[str]:
"""Return the used boot slot."""
return self._boot_slot
@dbus_connected
def install(self, raucb_file):
"""Install rauc bundle file.
Return a coroutine.
"""
return self.dbus.Installer.Install(raucb_file)
@dbus_connected
def get_slot_status(self):
"""Get slot status.
Return a coroutine.
"""
return self.dbus.Installer.GetSlotStatus()
@dbus_connected
def signal_completed(self):
"""Return a signal wrapper for completed signal.
Return a coroutine.
"""
return self.dbus.wait_signal(DBUS_NAME_RAUC_INSTALLER_COMPLETED)
@dbus_connected
def mark(self, state: RaucState, slot_identifier: str):
"""Get slot status.
Return a coroutine.
"""
return self.dbus.Installer.Mark(state, slot_identifier)
@dbus_connected
async def update(self):
"""Update Properties."""
data = await self.dbus.get_properties(DBUS_NAME_RAUC_INSTALLER)
if not data:
_LOGGER.warning("Can't get properties for rauc")
return
self._operation = data.get(DBUS_ATTR_OPERATION)
self._last_error = data.get(DBUS_ATTR_LAST_ERROR)
self._compatible = data.get(DBUS_ATTR_COMPATIBLE)
self._variant = data.get(DBUS_ATTR_VARIANT)
self._boot_slot = data.get(DBUS_ATTR_BOOT_SLOT)
|
[
"noreply@github.com"
] |
swipswaps.noreply@github.com
|
8882c1a68a42f5b8e201b5424ed071c14c1c054b
|
255e19ddc1bcde0d3d4fe70e01cec9bb724979c9
|
/dockerized-gists/6737282/snippet.py
|
5c2d162fb0a8a82270d9ca2decf243da38c25c92
|
[
"MIT"
] |
permissive
|
gistable/gistable
|
26c1e909928ec463026811f69b61619b62f14721
|
665d39a2bd82543d5196555f0801ef8fd4a3ee48
|
refs/heads/master
| 2023-02-17T21:33:55.558398
| 2023-02-11T18:20:10
| 2023-02-11T18:20:10
| 119,861,038
| 76
| 19
| null | 2020-07-26T03:14:55
| 2018-02-01T16:19:24
|
Python
|
UTF-8
|
Python
| false
| false
| 638
|
py
|
"""
Installs your SSH key on other hosts. A fabfile for lazy people.
"""
from fabric.api import task, run, put, env, cd
# Use sh instead of bash.
env.shell = '/bin/sh -l -c'
@task
def add_ssh_key(identity='~/.ssh/id_rsa.pub'):
# Copy the key over.
REMOTE_PATH = '~/id.pub'
put(identity, REMOTE_PATH)
with cd('~'):
# Make sure the SSH directory is created.
run('mkdir -p .ssh')
# And append to the authrized keys.
run('cat %(REMOTE_PATH)s >> ~/.ssh/authorized_keys' % locals())
# Be thourough and leave no trace of this interaction!
run('rm %(REMOTE_PATH)s' % locals())
|
[
"gistshub@gmail.com"
] |
gistshub@gmail.com
|
a13e095a11f32454452ddb7811aeced0983a69bb
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/p3BR/R2/benchmark/startQiskit269.py
|
0d004daf589a46c1043373aa682a4e2930b9c167
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,721
|
py
|
# qubit number=3
# total number=54
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.h(input_qubit[2]) # number=38
prog.cz(input_qubit[0],input_qubit[2]) # number=39
prog.h(input_qubit[2]) # number=40
prog.h(input_qubit[2]) # number=51
prog.cz(input_qubit[0],input_qubit[2]) # number=52
prog.h(input_qubit[2]) # number=53
prog.h(input_qubit[2]) # number=42
prog.cz(input_qubit[0],input_qubit[2]) # number=43
prog.h(input_qubit[2]) # number=44
prog.h(input_qubit[2]) # number=48
prog.cz(input_qubit[0],input_qubit[2]) # number=49
prog.h(input_qubit[2]) # number=50
prog.x(input_qubit[2]) # number=46
prog.cx(input_qubit[0],input_qubit[2]) # number=47
prog.cx(input_qubit[0],input_qubit[2]) # number=37
prog.cx(input_qubit[0],input_qubit[2]) # number=33
prog.h(input_qubit[2]) # number=25
prog.cz(input_qubit[0],input_qubit[2]) # number=26
prog.h(input_qubit[2]) # number=27
prog.h(input_qubit[1]) # number=7
prog.cz(input_qubit[2],input_qubit[1]) # number=8
prog.rx(0.17592918860102857,input_qubit[2]) # number=34
prog.rx(-0.3989822670059037,input_qubit[1]) # number=30
prog.h(input_qubit[1]) # number=9
prog.h(input_qubit[1]) # number=18
prog.cz(input_qubit[2],input_qubit[1]) # number=19
prog.h(input_qubit[1]) # number=20
prog.y(input_qubit[1]) # number=14
prog.h(input_qubit[1]) # number=22
prog.cz(input_qubit[2],input_qubit[1]) # number=23
prog.h(input_qubit[1]) # number=24
prog.z(input_qubit[2]) # number=3
prog.z(input_qubit[1]) # number=41
prog.x(input_qubit[1]) # number=17
prog.y(input_qubit[2]) # number=5
prog.x(input_qubit[2]) # number=21
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit269.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
c93c551d735e66aaea519dea5549a52276049c22
|
cba54fcdf5333aec90a70a6495f962f1c34818e9
|
/Data/ColorLUT/GenerateDefault.py
|
f2b8d3b2f8c34cee19657f30bf1e1ade794f1c65
|
[
"WTFPL"
] |
permissive
|
2lost4u/RenderPipeline
|
f89fcb1b40e6fd5e5d001b311c8d6c8048a05c13
|
12131b115775f97927633d71832af65b99eebd09
|
refs/heads/master
| 2020-04-30T12:36:43.617264
| 2015-10-03T15:39:22
| 2015-10-03T15:39:22
| 44,781,091
| 1
| 0
| null | 2015-10-23T00:23:48
| 2015-10-23T00:23:47
| null |
UTF-8
|
Python
| false
| false
| 340
|
py
|
from panda3d.core import PNMImage, Vec3
lutSize = 32
image = PNMImage(lutSize * lutSize, lutSize, 3, 2**16 - 1)
for r in xrange(lutSize):
for g in xrange(lutSize):
for b in xrange(lutSize):
image.setXel(r + b * lutSize, g, r / float(lutSize), g / float(lutSize), b / float(lutSize))
image.write("Default.png")
|
[
"tobias.springer1@googlemail.com"
] |
tobias.springer1@googlemail.com
|
b812bca292f39c58ce9c994d00b04ae5e4ff59e6
|
4a2aed648b3890e51732ac7d9ceaaeabfd985f99
|
/Linked list/Link2Doubly..py
|
aca4d43528d5c3464374a148787f7ce4e5e42eb1
|
[] |
no_license
|
bhatnagaranshika02/Data-Structures-Python
|
f544adeebc8af3b313354699dddd85af59889f4c
|
74de8a1c895b6ce18094d6c50beaee15a2715404
|
refs/heads/master
| 2023-03-13T06:47:20.247213
| 2021-03-01T13:58:53
| 2021-03-01T13:58:53
| 258,513,920
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,565
|
py
|
class Node:
def __init__(self,data,prev=0,next=0):
self.data=data
self.prev=prev
self.next=next
class linked_list:
def __init__ (self):
self.start=None
def addfront(self,data):
new_node=Node(data)
new_node.prev=None
new_node.next=self.start
if self.start is not None:
self.start.prev=new_node
else:
self.start=new_node
def deletefront(self,data):
if self.start==None:
print("List is empty")
else:
self.start=self.start.next
def delete(self,node):
temp=self.start
while temp:
if temp.data==node.data:
prevs=temp.prev
temp.next.prev=prevs
prevs.next=temp.next
break
else:
temp=temp.next
def removedupli(self):
temp=self.start
l=[]
while temp:
if temp.data not in l:
l.append(temp.data)
temp=temp.next
else:
nxt=temp.next
self.delete(temp)
temp=nxt
def addbefore(self,data,beforewhat):
new_node=Node(data)
if self.start==None:
print("List is empty")
else:
temp=self.start
before=temp
while temp!=None:
if temp.data!=beforewhat:
before=temp
temp=temp.next
else:
before.next=new_node
new_node.next=temp
break
def deletebefore(self,beforewhat):
if self.start==None or self.start==beforewhat:
print("Cant delete")
else:
temp=self.start
while temp!=None:
if temp.data==beforewhat:
if temp.prev==self.start:
self.start=temp
else:
prevs=temp.prev.prev
temp.prev=prevs
break
else:
temp=temp.next
def addafter(self,data,addafter):
new_node=Node(data)
temp=self.start
while temp!=None:
if temp.data==addafter:
temp2=temp.next
temp.next=new_node
new_node.next=temp2
new_node.prev=temp
break
else:
temp=temp.next
def deleteafter(self,afterwhat):
if self.start==None or self.start==afterwhat:
print("Cant delete")
else:
temp=self.start
while temp.next!=None:
if temp.data==afterwhat:
nexts=temp.next.next
temp.next=nexts
break
else:
temp=temp.next
def addlast(self,data):
new_node=Node(data)
temp=self.start
while temp!=None:
if temp.next==None:
temp.next=new_node
new_node.prev=temp
new_node.next=None
break
else:
temp=temp.next
def traverse(self):
if self.start==None:
print("List is empty")
else:
temp=self.start
while temp!=None:
print(temp.data,end=' ')
temp=temp.next
print("\n")
def count(self):
if self.start==None:
print("List is empty")
else:
temp=self.start
count=0
while temp!=None:
count+=1
temp=temp.next
print("Total count is:",count)
obj=linked_list()
obj.count()
obj.addfront(6)
obj.addafter(7,6)
obj.addafter(8,7)
obj.addafter(9,8)
obj.traverse()
obj.addbefore(88,8)
obj.traverse()
obj.deleteafter(88)
obj.traverse()
obj.deletebefore(7)
obj.traverse()
obj.deleteafter(7)
obj.traverse()
obj.deletebefore(9)
obj.traverse()
obj.addafter(10,9)
obj.addafter(11,10)
obj.addafter(11,11)
obj.traverse()
obj.removedupli()
obj.traverse()
|
[
"bhatnagaranshika02@gmail.com"
] |
bhatnagaranshika02@gmail.com
|
52373f03597ca24ce183d53200144aa87abed7a4
|
9cda2257468d0ef2f7706d4d07099a7b6d897f02
|
/letecode/121-240/121-144/139.py
|
356b9d3a4011116e168da519c86020449cab5cc9
|
[] |
no_license
|
hshrimp/letecode_for_me
|
4ba4031803687d7a309da9af4f003a328b48e53e
|
6dc5b8968b6bef0186d3806e4aa35ee7b5d75ff2
|
refs/heads/master
| 2021-12-11T07:37:22.323713
| 2021-08-23T08:16:00
| 2021-08-23T08:16:00
| 204,437,378
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,962
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: wushaohong
@time: 2020-05-27 11:25
"""
"""139. 单词拆分
给定一个非空字符串 s 和一个包含非空单词列表的字典 wordDict,判定 s 是否可以被空格拆分为一个或多个在字典中出现的单词。
说明:
拆分时可以重复使用字典中的单词。
你可以假设字典中没有重复的单词。
示例 1:
输入: s = "leetcode", wordDict = ["leet", "code"]
输出: true
解释: 返回 true 因为 "leetcode" 可以被拆分成 "leet code"。
示例 2:
输入: s = "applepenapple", wordDict = ["apple", "pen"]
输出: true
解释: 返回 true 因为 "applepenapple" 可以被拆分成 "apple pen apple"。
注意你可以重复使用字典中的单词。
示例 3:
输入: s = "catsandog", wordDict = ["cats", "dog", "sand", "and", "cat"]
输出: false"""
class Solution:
def wordBreak(self, s: str, wordDict) -> bool:
seq = [s]
while seq:
string = seq.pop()
if string in wordDict:
return True
length = len(string)
for i in range(length - 1, 0, -1):
if string[:i] in wordDict:
seq.append(string[i:])
return False
def wordBreak2(self, s: str, wordDict) -> bool:
n = len(s)
dp = [False] * (n + 1)
dp[0] = True
for i in range(n):
for j in range(i + 1, n + 1):
if dp[i] and s[i:j] in wordDict:
dp[j] = True
return dp[-1]
if __name__ == '__main__':
sol = Solution()
s = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
wordDict = ["a", "aa", "aaa", "aaaa", "aaaaa", "aaaaaa", "aaaaaaa", "aaaaaaaa", "aaaaaaaaa", "aaaaaaaaaa"]
s = "leetcode"
wordDict = ["leet", "code"]
print(sol.wordBreak2(s, wordDict))
|
[
"wushaohong@zhongan.com"
] |
wushaohong@zhongan.com
|
b04bd6e4fc179a3156752e3180ad68f515dc2426
|
d52cb4c2e880875944b14da0b8a9542235942ac8
|
/geeksforgeeks/strings/13_remove_common_characters_and_concatenate.py
|
f25b216069ea2a31f0095869d2573c87de6a3fb4
|
[] |
no_license
|
saparia-data/data_structure
|
fbd61535b68f92143b2cb2679377c0f56f424670
|
2e8700cfdaeefe0093e5b4fb2704b1abcd300d02
|
refs/heads/master
| 2023-05-08T18:54:52.250941
| 2021-06-04T05:44:29
| 2021-06-04T05:44:29
| 296,071,146
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,919
|
py
|
'''
Given two strings s1 and s2. Modify string s1 such that all the common characters of s1 and s2 is to be removed
and the uncommon characters of s1 and s2 is to be concatenated.
Note: If no modification is possible print -1.
Input:
The first line consists of an integer T i.e number of test cases.
The first line of each test case consists of a string s1.The next line consists of a string s2.
Output:
Print the concatenated string.
User Task:
The task is to complete the function concatenatedString() which removes the commong characters, concatenates, and returns the string.
Constraints:
1 <= T <= 200
1 <= |Length of Strings| <= 104
Example:
Input:
2
aacdb
gafd
abcs
cxzca
Output:
cbgf
bsxz
Explanation:
Testcase 1:The common characters of s1 and s2 are: a, d.The uncommon characters of s1 and s2 are: c, b, g and f.
Thus the modified string with uncommon characters concatenated is: cbgf.
Testcase 2: The common characters of s1 and s2 are: a,c . The uncommon characters of s1 and s2 are: b,s,x and z.
Thus the modified string with uncommon characters concantenated is: bsxz.
hints:
1)
The idea is to use an array of size 26 (or a hash) where key is character and value is number of strings in which character is present.
If a character is present in one string, then count is 1, else if character is present in both strings, count is 2.
2)
-Initialize result as empty string.
-Push all characters of 2nd string in map with count as 1.
-Traverse first string and append all those characters to result that are not present in map. Characters that are present in map, make count 2.
-Traverse second string and append all those characters to result whose count is 1.
'''
def concatenatedString(s,p):
res = ""
for i in s:
if(i in p):
continue
else:
res += i
for j in p:
if(j in s):
continue
else:
res += j
if(len(res)):
return res
return -1
#Another method
def concatenatedString1(s,p):
occurrence_s=[0 for i in range(256)]
occurrence_p=[0 for i in range(256)]
# storing the count of chars in s1
for i in range(len(s)):
occurrence_s[ord(s[i])]+=1
# storing the count of chars in p
for i in range(len(p)):
occurrence_p[ord(p[i])]+=1
concatenated_str=""
# Find characters of s1 that are not
# present in s2 and append to result
for i in range(len(s)):
if(occurrence_p[ord(s[i])]==0):
concatenated_str+=s[i]
# Find characters of s2 that are not
# present in s1.
for i in range(len(p)):
if(occurrence_s[ord(p[i])]==0):
concatenated_str+=p[i]
if(len(concatenated_str)):
return concatenated_str
return -1
s = "abcs"
p = "cxzca"
print(concatenatedString(s, p))
print(concatenatedString1(s, p))
|
[
"saparia.ashvin@tavant.com"
] |
saparia.ashvin@tavant.com
|
2706d20d8b42846ef0a92769ab0e1862cc9f67d6
|
7a11b6d4063685cb08b074ac8d08ab6e1d045ff5
|
/src/11_list_filter_map.py
|
fe22c07e7451ee77e9be1a6eb40d958c54a0fd5e
|
[] |
no_license
|
slavaider/python
|
8a9f5769bd519e0e270c5814ef46ec5c653ab7c1
|
f98896b8e9dd93fe7d2b4a495b67704ef5f08373
|
refs/heads/master
| 2023-03-02T15:12:56.218871
| 2021-02-07T16:20:08
| 2021-02-07T16:20:32
| 301,493,207
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 329
|
py
|
def square(number):
return number * number
def is_a(age): return age > 18
if __name__ == '__main__':
list1 = [1, 2, 3, 4, 5]
list2 = list(map(square, list1))
print(list2)
l1 = [10, 12, 19, 20]
l2 = list(filter(lambda age: age > 18, l1))
print(l2)
l2 = [i for i in l1 if is_a(i)]
print(l2)
|
[
"slavaider1@gmail.com"
] |
slavaider1@gmail.com
|
230d7556304b6d1e9e84253b384f89a7032e7e7c
|
04ea3d2a060151fc21179ca28373bfa516adcbfe
|
/deadly_corridor_scenario/data_preprocess.py
|
80aad30a1cf5cd87fb70ff51165b4c1f57794052
|
[] |
no_license
|
zhouziqunzzq/vizdoom-naive-agent
|
394bdea6f88272603a5e6bf8b012e719d6f24701
|
4406b280567eef169d448fa77266812094aea736
|
refs/heads/main
| 2023-03-04T19:21:13.129770
| 2021-02-06T11:27:21
| 2021-02-06T11:27:21
| 333,699,839
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,593
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : data_preprocess.py
# @Author: harry
# @Date : 1/27/21 7:05 PM
# @Desc : Data preprocessor of raw play data
import numpy as np
import glob
import os
import pickle
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from constants import *
from typing import Any, List, Tuple, Optional
def load_raw_data(path: str) -> List[Tuple['np.array', List[float], float]]:
"""
Load multiple raw play data from path and merge them together.
:param path: the path containing multiple raw data pickles.
:return: merged list of raw data.
"""
if not os.path.exists(path):
raise RuntimeError("raw data path not exist")
history = list()
h_list = glob.glob(os.path.join(path, '*.pkl'))
for h in h_list:
with open(h, 'rb') as f:
history.extend(pickle.load(f))
return history
def preprocess_raw_data(history: List[Tuple['np.array', List[float], float]]) \
-> ('np.array', 'np.array'):
"""
Filtering, normalizing, and concatenating raw data into np arrays.
:param history: a list of raw data.
:return: images, labels.
"""
imgs = list()
labels = list()
for h in history:
img, label, _ = h
# determine label
l_int = 0
label = list(np.array(label, dtype=bool))
try:
l_int = ACTION_LIST.index(label)
except ValueError:
# for now we skip sample whose action is not in ACTION_LIST
continue
# skip non-action sample
# if l_int == 0:
# continue
# normalize img
img = img.astype(np.float)
img /= 255.0
imgs.append(img)
labels.append(l_int)
return np.stack(imgs, axis=0), np.array(labels, dtype=np.int)
def test_data_preprocess():
his = load_raw_data(RAW_DATA_PATH)
print('num of raw data samples: ', len(his))
# samp_i = np.random.randint(0, len(his))
# print(his[samp_i][0])
# print(his[samp_i][1])
# print(his[samp_i][2])
# print(his[samp_i][0].shape)
# im = plt.imshow(his[samp_i][0], cmap='gray')
# plt.show()
x_train, y_train = preprocess_raw_data(his)
assert x_train.shape[0] == y_train.shape[0]
print('x_train.shape: ', x_train.shape)
print('y_train.shape: ', y_train.shape)
samp_i = np.random.randint(0, x_train.shape[0])
print('label of the displayed example: ', y_train[samp_i])
im = plt.imshow(x_train[samp_i], cmap='gray')
plt.show()
if __name__ == '__main__':
test_data_preprocess()
|
[
"zhouziqun@cool2645.com"
] |
zhouziqun@cool2645.com
|
cd0710aad9c6efbdf3cb3e9dcc4e8904d93f7c7a
|
0754e2e7aa1ffb90b54d563ce5a9317e41cfebf9
|
/Algorithm/Programmers/예선문제_0912_1.py
|
bf343580cadc687be1aa13eba4c93677632489b6
|
[] |
no_license
|
ChaeMyungSeock/Study
|
62dcf4b13696b1f483c816af576ea8883c57e531
|
6f726a6ecb43387e4a3b9d068a9c491b115c74c0
|
refs/heads/master
| 2023-01-24T20:59:52.053394
| 2020-12-07T14:54:34
| 2020-12-07T14:54:34
| 263,255,793
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,832
|
py
|
import re
def solution(new_id):
new_id = new_id.lower()
new_id = re.sub('[^[a-z0-9-_.]','',new_id)
c = 0
while 1:
if len(new_id) >=2 and new_id[c]=='[':
new_id = new_id[:c] + new_id[c+1:]
c -=1
elif len(new_id) == 1 and new_id[c] == '[':
new_id = ""
if c == len(new_id)-1:
break
c +=1
print(new_id)
b = 0
while 1:
if len(new_id)>=1 and b>=1 and new_id[b]=='.':
if new_id[b-1] == '.':
new_id = new_id[:b] + new_id[b+1:]
b -=1
if b == len(new_id)-1:
break
b +=1
a=0
while 1:
if a == 0 and new_id[a]=='.':
if len(new_id)>=2:
new_id = new_id[1:]
a = -1
else:
new_id = ""
break
if new_id[0] != '.' :
break
a += 1
if len(new_id)>=2 and new_id[-1] == '.':
new_id = new_id[:-1]
elif len(new_id) == 1 and new_id[-1] == '.':
new_id = ""
if len(new_id) == 0:
new_id += "a"
elif len(new_id) >=16:
new_id = new_id[:15]
if new_id[-1] == '.':
new_id = new_id[:-1]
if len(new_id)<=2:
while 1:
new_id += new_id[-1]
if new_id[-1] == '.':
new_id = new_id[:-1]
if len(new_id) == 3:
break
return new_id
new_id ="=+[{]}:?,<>/-_.~!@#$%^&*()=+[{]}:?,<>/"
print(solution(new_id))
# new_id ="z-+.^."
# print(solution(new_id))
# new_id ="=.="
# print(solution(new_id))
# new_id ="123_.def"
# print(solution(new_id))
# new_id ="abcdefghijklmn.p"
# print(solution(new_id))
|
[
"noreply@github.com"
] |
ChaeMyungSeock.noreply@github.com
|
24ea2a5d3090b4d31a336fddafb320974492ea58
|
f13acd0d707ea9ab0d2f2f010717b35adcee142f
|
/ABC/abc151-abc200/abc154/a.py
|
89362b6b10c2cd6de4bd06bbc15544c91d7f434a
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
KATO-Hiro/AtCoder
|
126b9fe89fa3a7cffcbd1c29d42394e7d02fa7c7
|
bf43320bc1af606bfbd23c610b3432cddd1806b9
|
refs/heads/master
| 2023-08-18T20:06:42.876863
| 2023-08-17T23:45:21
| 2023-08-17T23:45:21
| 121,067,516
| 4
| 0
|
CC0-1.0
| 2023-09-14T21:59:38
| 2018-02-11T00:32:45
|
Python
|
UTF-8
|
Python
| false
| false
| 248
|
py
|
# -*- coding: utf-8 -*-
def main():
s, t = input().split()
a, b = map(int, input().split())
u = input()
if s == u:
print(a - 1, b)
else:
print(a, b - 1)
if __name__ == '__main__':
main()
|
[
"k.hiro1818@gmail.com"
] |
k.hiro1818@gmail.com
|
e523c17f61fc2ea477066fd4d887bc7191e705e5
|
d78309688232cf5f411af4eff6b466c8cd30846b
|
/xutils/examples/tushare_live_feed.py
|
c9ef42c14431ca68089a602f29c006cfe727850f
|
[
"Apache-2.0"
] |
permissive
|
huangzhangfeng/x-utils
|
5f8c1148789cf3dbbe6dff149562e3fd50c17c7a
|
291d92832ee0e0c89bc22e10ecf2f44445e0d300
|
refs/heads/master
| 2020-06-15T03:31:33.613290
| 2018-04-16T05:43:06
| 2018-04-16T05:43:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 520
|
py
|
# -*- coding: utf-8 -*-
from xutils.bar_builder import (LiveFeed,
BarFrequency)
import tushare as ts
if __name__ == '__main__':
live_feed = LiveFeed(tickers=['zh500'],
frequency=BarFrequency.MINUTE,
live_quote_arg_func=ts.get_realtime_quotes)
live_feed.start()
while not live_feed.eof():
bars = live_feed.get_next_bar()
if bars is not None:
print(bars['zh500'].date_time, bars['zh500'].price)
|
[
"siqiao_xue@163.com"
] |
siqiao_xue@163.com
|
61a35ed86f7dcce431d0db4afc8fc306fcacbf12
|
c430b5b5b45f2fe94f7110fd56922bcf55c61269
|
/ixia/hlapi/4.98.122.39/library/common/ixiangpf/python/ixiangpf_commands/emulation_lacp_info.py
|
81a80b9fc2ad01c12e3b5d986294a671fe9f9876
|
[] |
no_license
|
rayjiang2013/RF
|
08189671398095d864d41ea5a3af1958e8eb6252
|
936d32629061c4685d8e18b5cf9f001255514ec1
|
refs/heads/master
| 2016-08-09T11:38:51.990559
| 2016-01-06T20:20:02
| 2016-01-06T20:20:02
| 47,857,352
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,096
|
py
|
# -*- coding: utf-8 -*-
import sys
from ixiaerror import IxiaError
from ixiangpf import IxiaNgpf
from ixiautil import PartialClass, make_hltapi_fail
class IxiaNgpf(PartialClass, IxiaNgpf):
def emulation_lacp_info(self, mode, **kwargs):
r'''
#Procedure Header
Name:
emulation_lacp_info
Description:
Retrieves information about the LACP protocol
The following operations are done:
aggregate_stats learned_info clear_stats configuration
Synopsis:
emulation_lacp_info
-mode CHOICES aggregate_stats
CHOICES global_learned_info
CHOICES per_port
CHOICES per_device_group
CHOICES per_lag_statistics
CHOICES clear_stats
CHOICES configuration
[-session_type CHOICES lacp staticLag
DEFAULT lacp]
[-handle ANY]
[-port_handle REGEXP ^[0-9]+/[0-9]+/[0-9]+$]
Arguments:
-mode
-session_type
The LACP to be emulated. CHOICES: lacp static_lag.
-handle
-port_handle
Return Values:
$::SUCCESS | $::FAILURE
key:status value:$::SUCCESS | $::FAILURE
On status of failure, gives detailed information.
key:log value:On status of failure, gives detailed information.
key:Aggregate stats: value:
key:<port_handle>.aggregate.port_name value:
key:<port_handle>.aggregate.sessions_up value:
key:<port_handle>.aggregate.sessions_flap value:
key:<port_handle>.aggregate.sessions_not_started value:
key:<port_handle>.aggregate.sessions_down value:
key:<port_handle>.aggregate.link_state value:
key:<port_handle>.aggregate.lag_id value:
key:<port_handle>.aggregate.total_lag_member_ports value:
key:<port_handle>.aggregate.lag_member_ports_up value:
key:<port_handle>.aggregate.lacpdu_tx value:
key:<port_handle>.aggregate.lacpdu_rx value:
key:<port_handle>.aggregate.lacpu_malformed_rx value:
key:<port_handle>.aggregate.marker_pdu_tx value:
key:<port_handle>.aggregate.marker_pdu_rx value:
key:<port_handle>.aggregate.marker_res_pdu_tx value:
key:<port_handle>.aggregate.marker_res_pdu_rx value:
key:<port_handle>.aggregate.marker_res_timeout_count value:
key:<port_handle>.aggregate.lacpdu_tx_rate_violation_count value:
key:<port_handle>.aggregate.marker_pdu_tx_rate_violation_count value:
key:<port_handle>.aggregate.lag_id value:
key:lag_id value:
key:actor_system_id value:
key:actor_system_priority value:
key:actor_port_number value:
key:administrative_key value:
key:actor_operationalkey value:
key:actor_lacp_activity value:
key:actor_lacp_activity value:
key:actor_lacpdu_timeout value:
key:actor_aggregration_enabled value:
key:actor_synchronized_flag value:
key:actor_synchronized_flag value:
key:actor_collecting_flag value:
key:actor_defaulted_flag value:
key:actor_expired_flag value:
key:link_aggregration_status value:
key:partner_system_id value:
key:partner_system_priority value:
key:partner_port_number value:
key:partner_port_priority value:
key:partner_operational_key value:
key:partner_lacp_activity value:
key:partner_lacpdu_timeout value:
key:partner_aggregration value:
key:partner_synchronized_flag value:
key:partner_collecting_flag value:
key:partner_distributing_flag value:
key:partner_defaulted_flag value:
key:partner_expired_flag value:
key:collectors_max_delay value:
key:other_lag_member_count value:
key:details value:
Examples:
Sample Input:
Sample Output:
Notes:
See Also:
'''
hlpy_args = locals().copy()
hlpy_args.update(kwargs)
del hlpy_args['self']
del hlpy_args['kwargs']
not_implemented_params = []
mandatory_params = []
file_params = []
try:
return self.__execute_command(
'emulation_lacp_info',
not_implemented_params, mandatory_params, file_params,
hlpy_args
)
except (IxiaError, ):
e = sys.exc_info()[1]
return make_hltapi_fail(e.message)
|
[
"jimhe@fortinet.com"
] |
jimhe@fortinet.com
|
bfd39877647de6747a51adede1939f0ff10c6d7c
|
871e1b0295c0fbbfca8191236d674866cf62ff01
|
/TrainEffNetB5_5FP_crossentropyloss_Block5_2FC_nonCrop_3.py
|
a4f1aba22a0059253ac6987c0239cd90c9ad026d
|
[] |
no_license
|
Peckkie/USAI_ABnormal_Screening
|
ce31a813e9303a7d43def912ab731cc633268cb7
|
82cd63ac9ab72fbe68eae254c15c7bf7ef906022
|
refs/heads/master
| 2023-02-16T13:32:33.678500
| 2021-01-07T02:36:35
| 2021-01-07T02:36:35
| 277,981,638
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,871
|
py
|
import PIL
from keras import models
from keras import layers
from tensorflow.keras import optimizers
import os
import glob
import shutil
import sys
import numpy as np
from skimage.io import imread
import matplotlib.pyplot as plt
import os
from tensorflow.keras import callbacks
import pandas as pd
os.environ["CUDA_VISIBLE_DEVICES"]="1"
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
batch_size = 16
epochs = 200
#Train
dataframe = pd.read_csv('/home/yupaporn/codes/USAI/traindf_fold1_3.csv')
base_dir = '/media/tohn/SSD/Images/Image1'
os.chdir(base_dir)
train_dir = os.path.join(base_dir, 'train')
#validation
valframe = pd.read_csv( '/home/yupaporn/codes/USAI/validationdf_fold1_3.csv')
validation_dir = os.path.join(base_dir, 'validation')
#load model
import efficientnet.tfkeras
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import load_model
model_dir = '/media/tohn/SSD/ModelTrainByImages/R1_1/models/B5_R1_5FP_relu_2FC_nonCrop_3.h5'
model = load_model(model_dir)
height = width = model.input_shape[1]
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
brightness_range=[0.5,1.5],
shear_range=0.4,
zoom_range=0.2,
horizontal_flip=False,
fill_mode='nearest')
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_dataframe(
dataframe = dataframe,
directory = train_dir,
x_col = 'Path Full',
y_col = 'Views',
target_size = (height, width),
batch_size=batch_size,
color_mode= 'rgb',
class_mode='categorical')
test_generator = test_datagen.flow_from_dataframe(
dataframe = valframe,
directory = validation_dir,
x_col = 'Path Full',
y_col = 'Views',
target_size = (height, width),
batch_size=batch_size,
color_mode= 'rgb',
class_mode='categorical')
os.chdir('/media/tohn/SSD/ModelTrainByImages/R2_1')
root_logdir = '/media/tohn/SSD/ModelTrainByImages/R2_1/my_logs_block52_5FP_1FC_nonCrop_3'
def get_run_logdir():
import time
run_id = time.strftime("run_%Y_%m_%d_%H_%M_%S")
return os.path.join(root_logdir,run_id)
run_logdir = get_run_logdir()
tensorboard_cb = callbacks.TensorBoard(log_dir = run_logdir)
# os.makedirs("./models_6", exist_ok=True)
def avoid_error(gen):
while True:
try:
data, labels = next(gen)
yield data, labels
except:
pass
#Unfreez
model.trainable = True
set_trainable = False
for layer in model.layers:
if layer.name == 'block5a_se_excite':
set_trainable = True
if set_trainable:
layer.trainable = True
else:
layer.trainable = False
print('This is the number of trainable layers '
'after freezing the conv base:', len(model.trainable_weights))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.RMSprop(lr=2e-5),
metrics=['acc'])
run_logdir = get_run_logdir()
tensorboard_cb = callbacks.TensorBoard(run_logdir)
#early_stop_cb = callbacks.EarlyStopping(monitor='val_acc', patience=66, mode= 'max')
history = model.fit_generator(
avoid_error(train_generator),
steps_per_epoch= len(dataframe)//batch_size,
epochs=epochs,
validation_data=avoid_error(test_generator),
validation_steps= len(valframe) //batch_size,
callbacks = [tensorboard_cb])
model.save('./models/B5_R2b5_5FP_relu_2FC_nonCrop_3.h5')
|
[
"w_yupaporn@kkumail.com"
] |
w_yupaporn@kkumail.com
|
51c6159b731d8e3312ebfcff8878205082045ed9
|
f921ba30c773da9772293f69aa88c87b23929cc6
|
/src/main.py
|
80b5bb6dcf36d3ae1797299e1f62a20284465be6
|
[
"MIT"
] |
permissive
|
R-Mielamud/Telegram_BooksDelivery
|
56349673b0bdb87204c35d4bce2cdb01d6d18722
|
0745e60a4541f38fba8ac378185aff558ec95147
|
refs/heads/master
| 2023-03-01T03:26:42.038174
| 2021-02-11T17:52:52
| 2021-02-11T17:52:52
| 326,045,333
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,056
|
py
|
from telebot import TeleBot as Bot
from helpers.conversation import ConversationsStorage, Conversation
from helpers.messaging import parse_manifest, send_until_question
from api import UsersAPI, OrdersAPI, RequisitesAPI, BillsAPI
from constants import BOT_TOKEN
bot = Bot(BOT_TOKEN)
welcome, manifest = parse_manifest()
conversations = ConversationsStorage()
users = UsersAPI()
orders = OrdersAPI()
requisites = RequisitesAPI()
bills = BillsAPI()
@bot.message_handler(commands=["start"])
def on_start(command):
uid = command.from_user.id
user = users.get_by_messenger_id(uid)
if not user:
users.create(messenger_id=uid, messenger="Telegram")
elif user.phone:
users.partial_update(user.id, phone=None)
bot.send_message(command.chat.id, welcome)
@bot.message_handler(content_types=["text"])
def on_message(message):
uid = message.from_user.id
prev_answer = message.text
user = users.get_by_messenger_id(uid)
if not user:
user = users.create(messenger_id=uid, phone=prev_answer, messenger="Telegram")
prev_answer = None
elif not user.phone:
users.partial_update(user.id, phone=prev_answer)
prev_answer = None
send = lambda text: bot.send_message(message.chat.id, text)
if not conversations.exists(uid):
conversations.add(uid, manifest, default_answers=user.convers_answers_data)
conversation = conversations.get(uid)
conversation, question = send_until_question(send, conversation, prev_answer)
if conversation.answers.stopped:
users.partial_update(user.id, convers_answers_data={})
conversation, _ = send_until_question(send, Conversation(manifest, default_answers={}), None)
elif not question:
update_data = {"convers_answers_data": {}}
action = conversation.answers.get("action")
if action == "order":
orders.create(
books=conversation.answers.get("books"),
user=user.id
)
elif action == "requisites":
result = requisites.create(
delivery_name=conversation.answers.get("delivery_name"),
delivery_phone=conversation.answers.get("delivery_phone"),
delivery_address=conversation.answers.get("delivery_address"),
post_service=conversation.answers.get("post_service")
)
update_data["requisites"] = result.id
elif action == "bill":
bills.create(
amount=conversation.answers.get("amount"),
comment=conversation.answers.get("comment"),
user=user.id
)
conversation, _ = send_until_question(send, Conversation(manifest, default_answers={}), None)
users.partial_update(user.id, **update_data)
elif not question.skip:
users.partial_update(user.id, convers_answers_data=conversation.answers.data)
conversations.set(uid, conversation)
if __name__ == "__main__":
print("Bot started!")
bot.polling()
|
[
"2m.roman2@gmail.com"
] |
2m.roman2@gmail.com
|
47ba284a792b5f983cbaf547bb02cc9069dde73b
|
4195cfc592b8c6ee42c5ea7b7d7d08b9899dd7c0
|
/spektral/datasets/qm7.py
|
0b226669c663f48b60d05bc6e3885431fd6d6681
|
[
"MIT"
] |
permissive
|
mbrukman/spektral
|
4f5cc708a2996469ebbf2b6133acca42c6a869bc
|
d720de476d04a8d9ed23570336eddfedb97dd7de
|
refs/heads/master
| 2023-02-03T09:16:32.579795
| 2020-12-15T18:00:24
| 2020-12-15T18:00:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,404
|
py
|
import os.path as osp
import numpy as np
import scipy.sparse as sp
from scipy.io import loadmat
from tensorflow.keras.utils import get_file
from spektral.data import Dataset, Graph
class QM7(Dataset):
"""
The QM7b dataset of molecules from the paper:
> [MoleculeNet: A Benchmark for Molecular Machine Learning](https://arxiv.org/abs/1703.00564)<br>
> Zhenqin Wu et al.
The dataset has no node features.
Edges and edge features are obtained from the Coulomb matrices of the
molecules.
Each graph has a 14-dimensional label for regression.
"""
url = 'http://deepchem.io.s3-website-us-west-1.amazonaws.com/datasets/qm7b.mat'
def __init__(self, **kwargs):
super().__init__(**kwargs)
def download(self):
get_file('qm7b.mat', self.url, extract=True, cache_dir=self.path,
cache_subdir=self.path)
def read(self):
print('Loading QM7 dataset.')
mat_file = osp.join(self.path, 'qm7b.mat')
data = loadmat(mat_file)
coulomb_matrices = data['X']
labels = data['T']
output = []
for i in range(len(coulomb_matrices)):
row, col, data = sp.find(coulomb_matrices[i])
a = sp.csr_matrix((np.ones_like(data), (row, col)))
e = data[:, None]
y = labels[i]
output.append(Graph(a=a, e=e, y=y))
return output
|
[
"daniele.grattarola@gmail.com"
] |
daniele.grattarola@gmail.com
|
3ec8a31d0882655804f8b5f2cb27daca6abfd5e7
|
2e8f0de7a1526ef511927783235edc93f7c90036
|
/communicare/core/migrations/0043_event_external_subscriptions.py
|
f59f64eaec3ff4408f9560c7e05cd7fcea47da6d
|
[] |
no_license
|
ConTTudOweb/CommunicareProject
|
3d663578dfdeb455bc49419b3d103daec69c8fab
|
211a1124c8c4549c609832ad71069a55c714a430
|
refs/heads/master
| 2022-12-21T12:59:35.424560
| 2021-05-10T22:16:15
| 2021-05-10T22:16:15
| 163,891,380
| 0
| 1
| null | 2022-12-08T07:43:22
| 2019-01-02T21:27:42
|
HTML
|
UTF-8
|
Python
| false
| false
| 439
|
py
|
# Generated by Django 2.1.8 on 2020-03-02 17:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0042_auto_20191104_0945'),
]
operations = [
migrations.AddField(
model_name='event',
name='external_subscriptions',
field=models.URLField(blank=True, null=True, verbose_name='inscrição externa'),
),
]
|
[
"sandrofolk@hotmail.com"
] |
sandrofolk@hotmail.com
|
2ac85d35e477f5d3e3fe61212bc2ef57463c03a6
|
637ab3853b560485d1b3c3ecbb469ff48114f3fb
|
/RepublicHyundai/taxapp/admin.py
|
66f846da14758850dcc5f7d4b76283e1f2e96d62
|
[] |
no_license
|
vishalrathodgithub/republic_hyundai
|
19b7af4f84cec5cec037f2ed3a77ec5dfd4be8b9
|
cb1241f7c696a5b067a6b1ad1ce27dd371b41e2e
|
refs/heads/master
| 2020-11-24T13:09:21.491228
| 2019-12-15T09:29:16
| 2019-12-15T09:29:16
| 228,159,070
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 297
|
py
|
from django.contrib import admin
from taxapp.models import *
# Register your models here.
class TaxMasterAdmin(admin.ModelAdmin):
list_display =['tax_product_category','tax_hsn','tax_sgst','tax_cgst','tax_igst']
admin.site.register(TaxMaster,TaxMasterAdmin)
admin.site.register(FinancialYear)
|
[
"you@example.com"
] |
you@example.com
|
8dc37cc18c0c38e4156e6ad424ef221774f15007
|
5a394c53a7099bc871401e32cf3fc782546f9f7d
|
/.history/lab1/Graph_20210130222041.py
|
a941a151506209424208f45209345b15f8b3979d
|
[] |
no_license
|
ajaygc95/advPy
|
fe32d67ee7910a1421d759c4f07e183cb7ba295b
|
87d38a24ef02bcfe0f050840179c6206a61384bd
|
refs/heads/master
| 2023-03-27T10:10:25.668371
| 2021-03-23T08:28:44
| 2021-03-23T08:28:44
| 334,614,292
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 436
|
py
|
from matplotlib import pyplot as plt
import numpy as np
import sklearn as sk
class Graph:
def __init__(self) -> None:
self.plot = plt
def plotGraph(self, x, y):
plt.plot(x,y)
plt.xlabel('Year')
plt.ylabel('Change in Temperature')
plt.title('Temperature change by year')
plt.legend('Change')
plt.show()
def plotlinear(self,x,y):
plt.plot(x,y)
plt.
|
[
"gcajay95@gmail.com"
] |
gcajay95@gmail.com
|
e8c4bb6b0a28096de4b86e45d42f7d4cf7c240ff
|
e837db39c9609830ab8e77dac2077ea30cadc5b3
|
/core/migrations/0003_auto_20190915_1905.py
|
368d065348ec67a18de5829c24f80f2c257f1185
|
[] |
no_license
|
windundschnee/accountneu
|
9c8ff1507f725a5179604be2640d76b5302a0299
|
da9066840a312a95bc628556c94738010787a01f
|
refs/heads/master
| 2022-12-10T06:00:42.449898
| 2019-10-25T18:29:23
| 2019-10-25T18:29:23
| 211,513,631
| 0
| 0
| null | 2022-12-08T05:22:15
| 2019-09-28T14:34:00
|
Python
|
UTF-8
|
Python
| false
| false
| 868
|
py
|
# Generated by Django 2.2 on 2019-09-15 17:05
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0002_auto_20190915_1758'),
]
operations = [
migrations.AddField(
model_name='allgeingaben',
name='schneelast',
field=models.DecimalField(decimal_places=2, default=2, max_digits=5, validators=[django.core.validators.MinValueValidator(0)]),
),
migrations.AddField(
model_name='allgeingaben',
name='schneelast_benutzerdefiniert',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='allgeingaben',
name='schneelastzone',
field=models.CharField(default='2', max_length=10),
),
]
|
[
"du@example.com"
] |
du@example.com
|
9c1dec43e89521c2bace3fda5c4a36ee10c09131
|
320280bfce76713436b76ffc3125ccf37e65a324
|
/AnalyzeMiniPlusSubstructure/test/ttbar/ttbar_82.py
|
439df1a9e7ad24d35f3968ac9303d4353832a0cf
|
[] |
no_license
|
skhalil/MiniValidation
|
75ea5c0d7cde17bf99c7d31501f8384560ee7b99
|
1a7fb8377e29172483ea6d3c7b3e427ff87e7e37
|
refs/heads/master
| 2016-09-05T10:31:38.562365
| 2015-01-29T05:30:32
| 2015-01-29T05:30:32
| 29,898,162
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,860
|
py
|
import FWCore.ParameterSet.Config as cms
###############################################
useMiniAOD = True
# AOD
pfcandidates = 'particleFlow'
chsstring = 'pfNoPileUpJME'
genjetparticles = 'genParticles'
importantgenparticles = 'genParticles'
tracks = 'generalTracks'
vertices = 'offlinePrimaryVertices'
mergedvertices = 'inclusiveMergedVertices'
mergedvertices2 = ''
primaryvertices = 'offlinePrimaryVertices'
#miniAOD
if useMiniAOD:
pfcandidates = 'packedPFCandidates'
genjetparticles = 'packedGenParticles'
importantgenparticles = 'prunedGenParticles'
tracks = 'unpackedTracksAndVertices'
vertices = 'unpackedTracksAndVertices'
mergedvertices = 'unpackedTracksAndVertices'
mergedvertices2 = 'secondary'
primaryvertices = 'offlineSlimmedPrimaryVertices'
print 'useMiniAOD = '+str(useMiniAOD)
print ' pfcandidates = '+pfcandidates
print ' genjetparticles = '+genjetparticles
print ' importantgenparticles = '+importantgenparticles
print ' tracks = '+tracks
print ' vertices = '+vertices
print ' mergedvertices = '+mergedvertices
print ' mergedvertices2 = '+mergedvertices2
print ' primaryvertices = '+primaryvertices
###############################################
# SETUP
process = cms.Process("USER")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(False) , allowUnscheduled = cms.untracked.bool(True) )
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
process.MessageLogger.cerr.FwkReport.reportEvery = 1000
process.MessageLogger.cerr.FwkJob.limit=1
process.MessageLogger.cerr.ERROR = cms.untracked.PSet( limit = cms.untracked.int32(0) )
###############################################
# SOURCE
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'root://cmsxrootd-site.fnal.gov//store/mc/Phys14DR/TTJets_MSDecaysCKM_central_Tune4C_13TeV-madgraph-tauola/MINIAODSIM/PU20bx25_PHYS14_25_V1-v1/00000/4881873E-BE76-E411-BDE9-0025901D4764.root'
)
)
###############################################
# ANA
process.demo = cms.EDAnalyzer("AnalyzeMiniPlusSubstructure",
vertices = cms.InputTag("offlineSlimmedPrimaryVertices"),
muons = cms.InputTag("slimmedMuons"),
electrons = cms.InputTag("slimmedElectrons"),
taus = cms.InputTag("slimmedTaus"),
photons = cms.InputTag("slimmedPhotons"),
jets = cms.InputTag("slimmedJets"),
fatjets = cms.InputTag("slimmedJetsAK8"),
mets = cms.InputTag("slimmedMETs"),
pfCands = cms.InputTag("packedPFCandidates"),
packed = cms.InputTag("packedGenParticles"),
pruned = cms.InputTag("prunedGenParticles"),
bits = cms.InputTag("TriggerResults","","HLT"),
prescales = cms.InputTag("patTrigger")
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string("ttbar82.root"),
closeFileFast = cms.untracked.bool(True)
)
###############################################
# RECO AND GEN SETUP
process.load('PhysicsTools.PatAlgos.producersLayer1.patCandidates_cff')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.Geometry_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.GlobalTag.globaltag ='PHYS14_25_V2'
#'START70_V6::All'
#'START70_V6::All'
process.load('RecoJets.Configuration.RecoPFJets_cff')
process.load('RecoJets.Configuration.RecoGenJets_cff')
#process.fixedGridRhoFastjetAll.pfCandidatesTag = pfcandidates
process.fixedGridRhoFastjetAll.pfCandidatesTag = 'packedPFCandidates'
process.fixedGridRhoAll.pfCandidatesTag = 'packedPFCandidates'
# process.fixedGridRhoAll.pfCandidatesTag = .InputTag("packedPFCandidates")
# process.fixedGridRhoFastjetAll = fixedGridRhoFastjetAll.clone( pfCandidatesTag = cms.InputTag("packedPFCandidates"))
# process.fixedGridRhoAll = fixedGridRhoAll.clone( pfCandidatesTag = cms.InputTag("packedPFCandidates"))
from RecoJets.JetProducers.SubJetParameters_cfi import SubJetParameters
from RecoJets.JetProducers.PFJetParameters_cfi import *
from RecoJets.JetProducers.CaloJetParameters_cfi import *
from RecoJets.JetProducers.AnomalousCellParameters_cfi import *
from RecoJets.JetProducers.CATopJetParameters_cfi import *
from RecoJets.JetProducers.GenJetParameters_cfi import *
from RecoJets.JetProducers.caTopTaggers_cff import *
###############################################
process.content = cms.EDAnalyzer("EventContentAnalyzer")
process.p = cms.Path(
#process.fixedGridRhoFastjetAll
process.demo
)
|
[
"skhalil@fnal.gov"
] |
skhalil@fnal.gov
|
ef32a3e70644ccee481a6bb2836324e2d3e9e4bf
|
b7f45072d056b80ed49e6bcde91877d8576e970d
|
/SQL/inc/py/all-names.py
|
b16948d631042a84c093b9e28cfe892992ea0f3f
|
[] |
no_license
|
jrminter/tips
|
128a18ee55655a13085c174d532c77bcea412754
|
f48f8b202f8bf9e36cb6d487a23208371c79718e
|
refs/heads/master
| 2022-06-14T08:46:28.972743
| 2022-05-30T19:29:28
| 2022-05-30T19:29:28
| 11,463,325
| 5
| 8
| null | 2019-12-18T16:24:02
| 2013-07-17T00:16:43
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 294
|
py
|
from sqlite3 import dbapi2 as sqlite
connection = sqlite.connect("experiments.db")
cursor = connection.cursor()
cursor.execute("SELECT FirstName, Lastname FROM Person ORDER BY LastName;")
results = cursor.fetchall();
for r in results:
print(r[0], r[1])
cursor.close();
connection.close();
|
[
"jrminter@gmail.com"
] |
jrminter@gmail.com
|
b3068976266831d415316817e2e3f7e468c472bc
|
e85b47c61947b13afd2d8855acff6c3fd0acd3e5
|
/accounts/models.py
|
2e83c57c385b45f7da7d1acf0d87954f0e2f8239
|
[] |
no_license
|
Zhoroev/django_exam5
|
e2c2c5165d1dc5eeff87708de47267ec35135034
|
af5646b16732f8dc5bf0068ce34467cb29d70541
|
refs/heads/main
| 2023-06-30T16:49:25.077947
| 2021-08-07T17:33:19
| 2021-08-07T17:33:19
| 393,749,728
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 476
|
py
|
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.db import models
class User(AbstractUser):
class TypeUserChoice(models.TextChoices):
ADMIN = 'admin'
SPECIALIST = 'specialist'
PERSONAL_CABINET = 'personal_cabinet'
type_user = models.CharField(max_length=120,
choices=TypeUserChoice.choices,
default=TypeUserChoice.PERSONAL_CABINET)
|
[
"erjan.joroev.kg@gmail.com"
] |
erjan.joroev.kg@gmail.com
|
c20262a0f63933cce6ec99765eb0929fd4f684d8
|
ed21823488a1cca51009793efa0b124e40d224a4
|
/neurobioseg/161207_multiple_training_segmentations/p161207_03_compute_paths.py
|
c8d1a821dce33c10b72ca127676a412318bedebd
|
[] |
no_license
|
jhennies/py_devel
|
4a41e13ec8cd9b834c3d5acf64becc0fa8ffc479
|
9fc860be95ae91064a40f25e26d4024fbae6eb1f
|
refs/heads/master
| 2021-01-16T23:25:56.716283
| 2017-03-10T17:49:55
| 2017-03-10T17:49:55
| 45,381,183
| 1
| 0
| null | 2017-03-10T17:49:56
| 2015-11-02T08:21:35
|
Python
|
UTF-8
|
Python
| false
| false
| 3,955
|
py
|
import os
import inspect
from hdf5_image_processing import Hdf5ImageProcessing as IP, Hdf5ImageProcessingLib as ipl
from hdf5_processing import RecursiveDict as rdict
from shutil import copy, copyfile
import numpy as np
import matplotlib.pyplot as plt
import processing_libip as libip
import sys
from yaml_parameters import YamlParams
__author__ = 'jhennies'
def load_images(filepath, skeys=None, recursive_search=False, logger=None):
if logger is not None:
logger.logging('Loading data from \n{}', filepath)
else:
print 'Loading data from \n{}'.format(filepath)
data = ipl()
data.data_from_file(
filepath=filepath,
skeys=skeys,
recursive_search=recursive_search,
nodata=True
)
return data
def compute_paths(yparams):
params = yparams.get_params()
thisparams = rdict(params['compute_paths'])
data = ipl()
for sourcekey, source in thisparams['sources'].iteritems():
# Load the necessary images
# 1. Determine the settings for fetching the data
try:
recursive_search = False
recursive_search = thisparams['skwargs', 'default', 'recursive_search']
recursive_search = thisparams['skwargs', sourcekey, 'recursive_search']
except KeyError:
pass
if len(source) > 2:
skeys = source[2]
else:
skeys = None
# 2. Load the data
yparams.logging('skeys = {}', skeys)
yparams.logging('recursive_search = {}', recursive_search)
data[sourcekey] = load_images(
params[source[0]] + params[source[1]], skeys=skeys, recursive_search=recursive_search,
logger=yparams
)
data['contacts'].reduce_from_leafs(iterate=True)
data['disttransf'].reduce_from_leafs(iterate=True)
# Set targetfile
targetfile = params[thisparams['target'][0]] \
+ params[thisparams['target'][1]]
yparams.logging('\nInitial datastructure: \n\n{}', data.datastructure2string(maxdepth=3))
for d, k, v, kl in data['segmentation'].data_iterator(yield_short_kl=True, leaves_only=True):
yparams.logging('===============================\nWorking on image: {}', kl + [k])
# # TODO: Implement copy full logger
# data[kl].set_logger(data.get_logger())
# prepare the dict for the path computation
indata = ipl()
indata['segmentation'] = np.array(data['segmentation'][kl][k])
indata['contacts'] = np.array(data['contacts'][kl][k])
indata['groundtruth'] = np.array(data['groundtruth'][kl][params['gtruthname']])
indata['disttransf'] = np.array(data['disttransf'][kl][k])
yparams.logging('Input datastructure: \n\n{}', indata.datastructure2string())
# Compute the paths sorted into their respective class
paths = ipl()
paths[kl + [k]] = libip.compute_paths_with_class(
indata, 'segmentation', 'contacts', 'disttransf', 'groundtruth',
thisparams,
ignore=thisparams['ignorelabels'],
max_end_count=thisparams['max_end_count'],
max_end_count_seed=thisparams['max_end_count_seed'],
debug=params['debug']
)
# Write the result to file
paths.write(filepath=targetfile)
def run_compute_paths(yamlfile, logging=True):
yparams = YamlParams(filename=yamlfile)
params = yparams.get_params()
# Logger stuff
yparams.set_indent(1)
yparams.startlogger(
filename=params['resultfolder'] + 'compute_paths.log',
type='w', name='ComputePaths'
)
try:
compute_paths(yparams)
yparams.logging('')
yparams.stoplogger()
except:
yparams.errout('Unexpected error')
if __name__ == '__main__':
yamlfile = os.path.dirname(os.path.abspath(__file__)) + '/parameters_ref.yml'
run_compute_paths(yamlfile, logging=False)
|
[
"julianhennies@hotmail.de"
] |
julianhennies@hotmail.de
|
0d4208c30ec0aab0cdd5a4405a79fedd5cf74c17
|
ffe555768c86e03e8528e1d77a3b763ef8decea1
|
/pygcn/smtest.py
|
91378e0c2848cc9a91f55cfdda709108851373b0
|
[] |
no_license
|
LittleQili/Toy-GCN
|
f4ba7dfa94468b44ef4687262d625678eea25007
|
2eb4ed0453b6acdff543aed7e965c80d424abc50
|
refs/heads/main
| 2023-03-25T17:41:04.459508
| 2020-12-27T07:40:02
| 2020-12-27T07:40:02
| 324,107,780
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,170
|
py
|
import time
import argparse
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
import torch.utils as tutil
# from utils import accuracy
from smmodel import GCN
from smdata import load_test_data
# Training settings
parser = argparse.ArgumentParser()
parser.add_argument('--no-cuda', action='store_true', default=False,
help='Disables CUDA training.')
parser.add_argument('--seed', type=int, default=42, help='Random seed.')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
id,adj_smiles,feature_smiles,allinput = load_test_data()
model = torch.load('weight/yijiaGCN1.pt')
model.eval()
if args.cuda:
model.cuda()
feature_smiles = feature_smiles.cuda()
adj_smiles = adj_smiles.cuda()
finalact = torch.nn.Sigmoid()
f = open('output_518030910146_1.txt','w')
f.write('Chemical,Label\n')
output = finalact(model(adj_smiles,feature_smiles))
for i in range(adj_smiles.shape[0]):
tmpf = output[i].item()
f.write(id[i] + ',%f\n' % tmpf)
f.close()
|
[
"you@example.com"
] |
you@example.com
|
67e65a797c551dbb3fbd1cc8e37359c580ca3a81
|
46225b4cd6234b3aeb31b0e88f85df44fddd3d53
|
/common_crawlers/common_crawlers/spiders/job_bole3.py
|
ed2a90fc2506289e2de9890670e8270d0ed127db
|
[] |
no_license
|
git-wsf/crawler_project
|
cdbd8aaa0e4f232ffb07cdc5d4db90fc858e1d40
|
665945214036da1d312d16de83b13430d2e529c6
|
refs/heads/master
| 2020-07-27T15:14:16.152390
| 2018-10-30T13:46:34
| 2018-10-30T13:46:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,267
|
py
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request
from common_crawlers.utils.common import get_md5
from common_crawlers.items import JobBoleItem, CustomItemLoader
from urllib import parse
class JobBole3Spider(scrapy.Spider):
name = 'job_bole3'
allowed_domains = ['jobbole.com']
start_urls = ['http://blog.jobbole.com/all-posts/page/166/']
def parse(self, response):
all_links = response.xpath('//div[@id="archive"]/div/div[@class="post-thumb"]/a')
if all_links:
for each_link in all_links:
each_url = each_link.xpath('@href')
img_url = each_link.xpath('img/@src')
if img_url:
thumbnail_url = img_url.extract()[0]
else:
thumbnail_url = ""
yield Request(parse.urljoin(response.url, each_url.extract()[0]),
callback=self.parse_detail, meta={'thumbnail_url': thumbnail_url})
# next_page = response.xpath('//a[@class="next page-numbers"]/@href').extract_first()
# self.logger.info('下一页的链接是:{}'.format(next_page))
# if next_page:
# yield Request(next_page, callback=self.parse)
def parse_detail(self, response):
"""
使用xpath方法
获取文章页面的标题、发布时间、内容、点赞数、评论数、文章标签等
"""
self.logger.info('正在抓取的url是:{0}'.format(response.url))
l = CustomItemLoader(item=JobBoleItem(), response=response)
l.add_xpath('title', '//div[@class="entry-header"]/h1/text()')
l.add_value('thumbnail_url', response.meta['thumbnail_url'])
l.add_value('article_url', response.url)
l.add_value('article_url_id', get_md5(response.url))
l.add_xpath('create_time', '//p[@class="entry-meta-hide-on-mobile"]/text()')
# l.add_xpath('content', '//div[@class="entry"]')
l.add_xpath('like_num', '//h10[contains(@id,"votetotal")]/text()')
l.add_xpath('comment_num', '//a[@href="#article-comment"]/span/text()')
l.add_xpath('tags', '//p[@class="entry-meta-hide-on-mobile"]/a[not(contains(text(),"评论"))]/text()')
return l.load_item()
|
[
"982698913@qq.com"
] |
982698913@qq.com
|
7d0ebc005536be7a6d2ce1733d115bea7b53644b
|
0d0263ab9a24fc1bea785bcd913a8a113c571444
|
/03.02.object_detection_predict/SSD_detect.py
|
4df5bb9df70b4346ead58a4d2a3f46116c1d060b
|
[] |
no_license
|
chenbobaoge/BeginnerDL
|
3b4f393802c230a7a9ed60828af96573fb340282
|
e038334880ee521fcf2d92953b530a5518346c35
|
refs/heads/master
| 2021-04-28T17:15:48.345072
| 2018-01-20T06:41:36
| 2018-01-20T06:41:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,693
|
py
|
from keras.preprocessing import image
from scipy.misc import imread
from keras.applications.imagenet_utils import preprocess_input
import numpy as np
from keras.models import load_model
from keras_layers.SSD import PriorBox,Normalize, BBoxUtility
import pickle
import matplotlib.pyplot as plt
from keras_config.SSDConfig import SSDConfig
cfg = SSDConfig()
custom_objects = {
'PriorBox' : PriorBox,
'Normalize' : Normalize,
'BBoxUtility' : BBoxUtility
}
model = load_model('../weights/ssd.h5', custom_objects=custom_objects)
files = ('fish-bike.jpg', 'cat.jpg', 'boys.jpg', 'car_cat.jpg', 'car_cat2.jpg')
inputs = []
images = []
for f in files:
f = "./pics/{}".format(f)
img = image.load_img(""+f, target_size=(300, 300))
img = image.img_to_array(img)
images.append(imread(f))
inputs.append(img.copy())
inputs = preprocess_input(np.array(inputs))
preds = model.predict(inputs, batch_size=1, verbose=1)
# preds.shape (5, 7308, 33)
priors = pickle.load(open('./SSD300/prior_boxes_ssd300.pkl', 'rb'))
bbox_util = BBoxUtility(cfg.NUM_CLASS, priors)
results = bbox_util.detection_out(preds)
# type(results): list, len(results): 5, len(result[0]): 200, results[0].shape: (200, 6)
for i, img in enumerate(images):
# Parse the outputs.
det_label = results[i][:, 0]
det_conf = results[i][:, 1]
det_xmin = results[i][:, 2]
det_ymin = results[i][:, 3]
det_xmax = results[i][:, 4]
det_ymax = results[i][:, 5]
# Get detections with confidence higher than 0.6.
top_indices = [i for i, conf in enumerate(det_conf) if conf >= 0.5]
top_conf = det_conf[top_indices]
top_label_indices = det_label[top_indices].tolist()
top_xmin = det_xmin[top_indices]
top_ymin = det_ymin[top_indices]
top_xmax = det_xmax[top_indices]
top_ymax = det_ymax[top_indices]
colors = plt.cm.hsv(np.linspace(0, 1, 21)).tolist()
plt.imshow(img / 255.)
currentAxis = plt.gca()
for i in range(top_conf.shape[0]):
xmin = int(round(top_xmin[i] * img.shape[1]))
ymin = int(round(top_ymin[i] * img.shape[0]))
xmax = int(round(top_xmax[i] * img.shape[1]))
ymax = int(round(top_ymax[i] * img.shape[0]))
score = top_conf[i]
label = int(top_label_indices[i])
label_name = cfg.VOC_CLASSES[label - 1]
display_txt = '{:0.2f}, {}'.format(score, label_name)
coords = (xmin, ymin), xmax-xmin+1, ymax-ymin+1
color = colors[label]
currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2))
currentAxis.text(xmin, ymin, display_txt, bbox={'facecolor':color, 'alpha':0.5})
plt.show()
|
[
"jeffcobile@gmail.com"
] |
jeffcobile@gmail.com
|
85e4240770dd910e3b1334465f3b688842fb6c4e
|
8fa9174a6136c3f27c9090c2528e8d69835656a2
|
/app/mutant/tests/test_mutant_viewsets.py
|
dace4ccaf1ab5573dfd3bb347b3d654029504e2f
|
[] |
no_license
|
hugoseabra/xmen-magneto-ambition
|
61336a8ce0eec7425f747c548dba87cb1bf8e88e
|
36caec2ae9102fc31ec502fe23d080b4625325b4
|
refs/heads/master
| 2023-07-08T05:57:36.007412
| 2021-08-18T00:36:47
| 2021-08-18T00:36:47
| 397,329,902
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,929
|
py
|
from django.test import TestCase
from django.urls import reverse
class MutantEndpointTests(TestCase):
def _get_url(self):
return reverse('mutant:mutant-list')
def test_validation_of_how_many_items(self):
"""Tests error when less than 6 sequences is sent """
data = {
'dna': [
"ATGCGA",
"CAGTGC",
"TTATGT",
"AGAAGG",
"CCCCTA"
]
}
response = self.client.post(self._get_url(), data=data)
self.assertContains(response, 'DNA is not valid', status_code=400)
data = {
'dna': [
"ATGCGA",
"CABTGC", # <-- WRONG char
"TTATGT",
"AGAAGG",
"CCCCTA",
"TCACTG",
]
}
response = self.client.post(self._get_url(), data=data)
self.assertContains(
response,
'You must provide correct amino acid values with 6 digits:'
' A, C, G, T',
status_code=400
)
def test_post_mutant_check(self):
"""Tests whether a sequence of amino acids of a DNA is mutant """
data = {
'dna': [
"TTATTT",
"CAGTGC",
"TTATTT",
"TTATTT",
"GCGTCA",
"TTATTT",
]
}
response = self.client.post(self._get_url(), data=data)
self.assertContains(response, 'DNA is not mutant', status_code=403)
data = {
'dna': [
"ATGCGA",
"CAGTGC",
"TTATGT",
"AGAAGG",
"CCCCTA",
"TCACTG"
]
}
response = self.client.post(self._get_url(), data=data)
self.assertContains(response, 'DNA is mutant', status_code=200)
|
[
"hugoseabra19@gmail.com"
] |
hugoseabra19@gmail.com
|
dcd2341b9a1ca25f9d958de48d6f57195b81b110
|
114372880a520f30f2d07b1b13a146f96454fd2e
|
/backend/channel_plugin/channel_plugin/contrib/sites/migrations/0003_set_site_domain_and_name.py
|
ff7bbcef9c5f692431794448bd76e7cd3743344e
|
[
"MIT"
] |
permissive
|
wenotch/zc_plugin_channels
|
0ddb92064e6013a3f65d6ee65c526e0e605df25a
|
4e88aa4a3a1a140848c22a0fd90c6486560c3deb
|
refs/heads/main
| 2023-07-19T04:24:40.479183
| 2021-09-03T14:31:28
| 2021-09-03T14:31:28
| 401,643,447
| 1
| 0
| null | 2021-08-31T09:20:54
| 2021-08-31T09:20:53
| null |
UTF-8
|
Python
| false
| false
| 1,012
|
py
|
"""
To understand why this file is here, please read:
http://cookiecutter-django.readthedocs.io/en/latest/faq.html#why-is-there-a-django-contrib-sites-directory-in-cookiecutter-django
"""
from django.conf import settings
from django.db import migrations
def update_site_forward(apps, schema_editor):
"""Set site domain and name."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "example.com",
"name": "Channel Plugin",
},
)
def update_site_backward(apps, schema_editor):
"""Revert site domain and name to default."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID, defaults={"domain": "example.com", "name": "example.com"}
)
class Migration(migrations.Migration):
dependencies = [("sites", "0002_alter_domain_unique")]
operations = [migrations.RunPython(update_site_forward, update_site_backward)]
|
[
"charlesboy49@gmail.com"
] |
charlesboy49@gmail.com
|
e237d04b87dc293db59d1d7946558444d2c591d0
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/know_first_place_about_other_year/think_fact.py
|
5ccb9491f21faeaab2c0396088561a505e242764
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 219
|
py
|
#! /usr/bin/env python
def world(str_arg):
part_or_different_eye(str_arg)
print('early_work')
def part_or_different_eye(str_arg):
print(str_arg)
if __name__ == '__main__':
world('new_place_and_way')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
5640887551d1f6accdd6e95361bd131529b00b45
|
3899dd3debab668ef0c4b91c12127e714bdf3d6d
|
/venv/Lib/site-packages/tensorflow/_api/v1/keras/initializers/__init__.py
|
aeaecd1674940e94d41121da513763e784bc99ac
|
[] |
no_license
|
SphericalPotatoInVacuum/CNNDDDD
|
b2f79521581a15d522d8bb52f81b731a3c6a4db4
|
03c5c0e7cb922f53f31025b7dd78287a19392824
|
refs/heads/master
| 2020-04-21T16:10:25.909319
| 2019-02-08T06:04:42
| 2019-02-08T06:04:42
| 169,691,960
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,855
|
py
|
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Keras initializer serialization / deserialization.
"""
from __future__ import print_function
from tensorflow.python import Constant
from tensorflow.python import Constant as constant
from tensorflow.python import GlorotNormal as glorot_normal
from tensorflow.python import GlorotUniform as glorot_uniform
from tensorflow.python import Identity
from tensorflow.python import Identity as identity
from tensorflow.python import Initializer
from tensorflow.python import Ones
from tensorflow.python import Ones as ones
from tensorflow.python import Orthogonal
from tensorflow.python import Orthogonal as orthogonal
from tensorflow.python import VarianceScaling
from tensorflow.python import Zeros
from tensorflow.python import Zeros as zeros
from tensorflow.python import he_normal
from tensorflow.python import he_uniform
from tensorflow.python import lecun_normal
from tensorflow.python import lecun_uniform
from tensorflow.python.keras.initializers import RandomNormal
from tensorflow.python.keras.initializers import RandomNormal as normal
from tensorflow.python.keras.initializers import RandomNormal as random_normal
from tensorflow.python.keras.initializers import RandomUniform
from tensorflow.python.keras.initializers import RandomUniform as random_uniform
from tensorflow.python.keras.initializers import RandomUniform as uniform
from tensorflow.python.keras.initializers import TruncatedNormal
from tensorflow.python.keras.initializers import TruncatedNormal as truncated_normal
from tensorflow.python.keras.initializers import deserialize
from tensorflow.python.keras.initializers import get
from tensorflow.python.keras.initializers import serialize
del print_function
|
[
"a@bogdanov.co"
] |
a@bogdanov.co
|
4bbc91ad36755f133c5b473d3e59491f7d00cb10
|
8329282a8fda056d705c1af6dbcd0de1ed7ca25e
|
/.history/textutiles/textutiles/views_20210522204110.py
|
615e455ae34300f081f062b014e9c40135cf0c9b
|
[] |
no_license
|
ritikalohia/Django-beginners-
|
c069b16867407ef883bb00c6faf4f601921c118a
|
829e28ab25201853de5c71a10ceff30496afea52
|
refs/heads/main
| 2023-05-04T03:34:29.082656
| 2021-05-22T17:38:21
| 2021-05-22T17:38:21
| 369,869,599
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,455
|
py
|
#created
from django.http import HttpResponse
from django.shortcuts import render
def index(request):
#params = {'name' : 'Ritika', 'place' : 'Mars'}
return render(request, 'index.html')
#return HttpResponse("Home")
def contact(request):
return render(request, 'contact')
def analyze(request):
#get the text in head
djtext = request.GET.get('text', 'default' )
#check checkbox values
removepunc = request.GET.get('removepunc', 'off')
fullcaps = request.GET.get('fullcaps', 'off')
newlineremover = request.GET.get('newlineremover', 'off')
spaceremover = request.GET.get('spaceremover', 'off'),
charcount = request.GET.get('charcount', 'off')
if removepunc == "on":
#analyzed = djtext
punctuations = '''!()-[]{};:'"\,<>./?@#$%^&*_'''
analyzed = ""
for char in djtext:
if char not in punctuations:
analyzed = analyzed + char
params ={'purpose':'removed punctuations', 'analyzed_text': analyzed}
#analyze the text
return render(request, 'analyze.html', params)
elif(fullcaps == "on"):
analyzed =""
for char in djtext:
analyzed = analyzed + char.upper()
params ={'purpose':'changed to UPPERCASE', 'analyzed_text': analyzed}
#analyze the text
return render(request, 'analyze.html', params)
elif(newlineremover== "on"):
analyzed =""
for char in djtext:
if char != '\n':
analyzed = analyzed + char
params ={'purpose':'Removed new lines', 'analyzed_text': analyzed}
#analyze the text
return render(request, 'analyze.html', params)
elif(spaceremover== "on"):
analyzed =""
for index, char in enumerate(djtext):
if not djtext[index] == " " and djtext[index+1]==" ":
analyzed = analyzed + char
params ={'purpose':'extra space removed', 'analyzed_text': analyzed}
#analyze the text
return render(request, 'analyze.html', params)
elif(charcount== "on"):
a=0
for char in djtext:
a = a + 1
params ={'purpose':'extra space removed', 'analyzed_text': a}
#analyze the text
return render(request, 'analyze.html', params)
else:
return HttpResponse("Error")
# def capfirst(request):
# return HttpResponse("capitalize first")
|
[
"rtklohia@gmail.com"
] |
rtklohia@gmail.com
|
e0d37014e4b7a9b35335efd96f0808e2c0f5f6ac
|
48c07006c95631ccba60bf08942a0ad5f458b453
|
/backend/wallet/admin.py
|
fd30a9aae90dd41aa5ec089a0797784423b75c71
|
[] |
no_license
|
crowdbotics-apps/irmflorida-21805
|
80359102e4cb3f949cbc4261f3345338d4ca5e31
|
6cca6ab134dc423a0dfe6c5180a3310471f44392
|
refs/heads/master
| 2022-12-31T22:17:17.827396
| 2020-10-22T13:09:20
| 2020-10-22T13:09:20
| 306,148,773
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 382
|
py
|
from django.contrib import admin
from .models import (
CustomerWallet,
PaymentMethod,
PaymentTransaction,
TaskerPaymentAccount,
TaskerWallet,
)
admin.site.register(CustomerWallet)
admin.site.register(PaymentTransaction)
admin.site.register(PaymentMethod)
admin.site.register(TaskerWallet)
admin.site.register(TaskerPaymentAccount)
# Register your models here.
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
e1a146982bf9b581fb39ab7883f2b8bc9180d00d
|
287e70a3b2382d1b4c3dd4bc3454ec66a7af1ac5
|
/tsuanlo/thongke.py
|
c8719e935443b954dc81c57ceb57dc9dc94cc2cb
|
[] |
no_license
|
twgo/su5pio2
|
9b7c070bf3483312628d3c338949703147189a8a
|
24a567604aec090fdbce11335bbfef56c71f6888
|
refs/heads/master
| 2020-03-11T14:19:23.606168
| 2018-10-16T12:22:26
| 2018-10-16T12:22:26
| 130,050,110
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,321
|
py
|
from 臺灣言語工具.解析整理.拆文分析器 import 拆文分析器
from builtins import set
import json
from 臺灣言語工具.音標系統.台語 import 新白話字
from 臺灣言語工具.音標系統.閩南語.臺灣閩南語羅馬字拼音 import 臺灣閩南語羅馬字拼音通行韻母表
def su():
imchiat = 0
su = 0
tsuanpooji = set()
tsuanpoojitiau = set()
tsuanpoosu = set()
for liong, 句物件 in enumerate(tsuliau()):
imchiat += len(句物件.篩出字物件())
su += len(句物件.網出詞物件())
for 詞物件 in 句物件.網出詞物件():
if not 詞物件.敢是標點符號():
tshingkhi = True
for 字物件 in 詞物件.篩出字物件():
tailo = 新白話字(字物件.型)
if (
tailo.音標 is not None and
tailo.韻 in 臺灣閩南語羅馬字拼音通行韻母表
):
tsuanpooji.add(字物件.看分詞().strip('0123456789'))
tsuanpoojitiau.add(字物件.看分詞())
else:
tshingkhi = False
if tshingkhi:
tsuanpoosu.add(詞物件.看分詞().lstrip('01'))
with open('tsonghong.json', 'w') as tong:
json.dump(
{
'總音節數(無算標點)': imchiat,
'總詞數(無算標點)': su,
'資料總數(詞、句、段)': liong,
'詞種類(無算標點)': len(tsuanpoosu),
'音節加調種類': len(tsuanpoojitiau),
'音節無調種類': len(tsuanpooji),
},
tong, ensure_ascii=False, sort_keys=True, indent=2
)
with open('tsuanpoojitiau.txt', 'w') as tong:
print('\n'.join(sorted(tsuanpoojitiau)), file=tong)
with open('tsuanpooji.txt', 'w') as tong:
print('\n'.join(sorted(tsuanpooji)), file=tong)
with open('tsuanpoosu.txt', 'w') as tong:
print('\n'.join(sorted(tsuanpoosu)), file=tong)
def tsuliau():
with open('tsuanpooku.txt') as tong:
for tsua in tong.read().split('\n'):
yield 拆文分析器.建立句物件(tsua)
su()
|
[
"ihcaoe@gmail.com"
] |
ihcaoe@gmail.com
|
50602a2a258c39fa894476cd9a9cbbc0de1e795a
|
e27509d78f9bc9746c535b4ed7adec6370741af6
|
/bwdesignworld/magazineisuue/views.py
|
732a1d7f95c369a29873e8f7c6a683ee1d397957
|
[] |
no_license
|
bopopescu/bw
|
deae2323039d70b32a35043361f8562b09bea630
|
1ddb78a280388548ef7dd201a361db710e271791
|
refs/heads/master
| 2022-11-22T18:29:05.581667
| 2019-10-01T03:30:48
| 2019-10-01T03:30:48
| 282,542,998
| 0
| 0
| null | 2020-07-25T23:41:43
| 2020-07-25T23:41:42
| null |
UTF-8
|
Python
| false
| false
| 2,009
|
py
|
# -*- coding: utf-8 -*-
from django.shortcuts import render
from django.http import HttpResponse
import datetime
from django.utils import timezone
from django.http import HttpResponseRedirect
from django.http import HttpResponse
import json
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger,InvalidPage
from django.conf import settings
# Create your views here.
from magazineisuue.models import Magazine
from bwdesignworld.utils import sidebar_data, category_jump_list, closeDbConnection
def magazineissue_listing(request, year):
meta_title = 'BW defence Magazine – '+str(year)+' Issues'
meta_description = 'BW defence Magazine is one of the most popular and respected News and Updates for defence in India. Here is a list of issues released in '+str(year)
meta_keyword = 'Web Exclusive, News and Updates for defence in India, News and Updates for defence in India'
og_title = 'BW defence Magazine – '+str(year)+' Issues'
og_url = '/magazine -issue'
og_image = settings.AWS_S3_BASE_URL + settings.BUCKET_PATH +'static_bwhr/images/BW-defence-logo.jpg'
category_jumlist = category_jump_list()
magazine_allyear = Magazine.objects.raw("SELECT magazine_id , YEAR(publish_date_m) as years FROM magazine GROUP BY YEAR(publish_date_m) ORDER BY publish_date_m DESC ")
if request.method == 'GET':
if(year!=''):
magazine_listing = Magazine.objects.raw("SELECT * FROM magazine WHERE YEAR(publish_date_m) = '"+year+"' ORDER BY publish_date_m DESC ")
return render(request, 'magazineissue/magazineissue_listing.html', {
'meta_title': meta_title,
'meta_description': meta_description,
'meta_keyword': meta_keyword,
'og_title':og_title,
'og_url':og_url,
'og_image':og_image,
'magazine_allyear':magazine_allyear,
'magazine_listing':magazine_listing,
'category_jumlist':category_jumlist,
'year':year
})
|
[
"kr.sumit89@gmail.com"
] |
kr.sumit89@gmail.com
|
84d900bd249dc56e022df49fe1adc11d8e94cc33
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03106/s203963496.py
|
3326c7676c3537cc9fbbf30aabd9f17c4dbc711a
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 172
|
py
|
A, B, K = map(int, input().split())
I = []
if A < B:
C = A
else:
C = B
for i in range(1, C+1):
if (A % i == 0) & (B % i == 0):
I.append(i)
print(I[-K])
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
031beb90b4f484126de2a0aa9d25895b63eba864
|
716abd9e5ba4b72b72cc5f724a6cc0a6ad4390d1
|
/8-Working with Python Modules/35-Introduction-to-Python-Modules.py
|
d82b80c881be39c2f7329e85993ccf5267b6067f
|
[] |
no_license
|
devopstasks/PythonScripting
|
ac45edd72dc134ec3539b962f02dfc866f365ecf
|
48bc37733ae6b3be4e2d64909ffe0962b6908518
|
refs/heads/master
| 2023-03-29T11:18:01.329452
| 2021-04-07T03:25:20
| 2021-04-07T03:25:20
| 350,388,744
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,505
|
py
|
'''
======================================================
What is a module?
=> A module is a file containing python definitions and statements.That means module containing
python functions,classes and variables.
What is the use of module?
=> Reusability
ex: If script name is mymodule.py then module name is mymodule
Types of Python modules:
=> Default modules
=> Third-party modules
Note: Import either default or third party modules before using them.
======================================================
=======================================================
=> List all functions and variables of a module using the dir() function
=> Getting help of a particular module
=> from script: print(help(math))
from python command line: help(math)
import math
dir(math)
help(math)
=> install a third-party module
pip install <module-name>
pip install xlrd
pip install xlwt
import xlrd
import xlwt
dir(xlrd)
help(xlrd)
dir(xlwt)
help(xlwt)
======================================================
=======================================================
Method-1
========
import math
print(math.pi)
print(math.pow(3,2))
Method-2
========
import math as m
print(m.pi)
print(m.pow(3,2))
Method-3
========
from math import *
print(pi)
print(pow(3,2))
Method-4
========
from math import pi,pow
print(pi)
print(pow(3,2))
=========
import platform
import math
import sys
import os
import subprocess
or
import platform,math,sys,os,subprocess
======================================================
'''
|
[
"rpadhan2015@gmail.com"
] |
rpadhan2015@gmail.com
|
09fd87d07d10fa12b78b49a16a161ae0c05646ac
|
3b7a07ad2d78e8526ad8ae0767530d98aaff9f3e
|
/backend/core/settings/dev.py
|
5e31a25b6b87f5c7ce7ba9c4302df39b89fc9375
|
[] |
no_license
|
ImmortalViolet/one-oms
|
ba2281acdd63f35eb907651e5aae240c97c16e8b
|
9b89e2773511cb4f9fe37c4cde79e9e3e47464fe
|
refs/heads/master
| 2022-04-12T17:38:59.052337
| 2020-02-28T04:29:34
| 2020-02-28T04:29:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 269
|
py
|
# -*- coding: utf-8 -*-
# author: timor
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# db
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, '../core.db'),
}
}
|
[
"itimor@126.com"
] |
itimor@126.com
|
6ee9e09c43f596747e6f734706f13995d405a1f5
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_overruns.py
|
b104b69597f068db56d3556bd4709eb128fbc11a
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 245
|
py
|
from xai.brain.wordbase.verbs._overrun import _OVERRUN
#calss header
class _OVERRUNS(_OVERRUN, ):
def __init__(self,):
_OVERRUN.__init__(self)
self.name = "OVERRUNS"
self.specie = 'verbs'
self.basic = "overrun"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
c51305ba5e4d56ee36a2a1cdb829f4a76038d5d3
|
59166105545cdd87626d15bf42e60a9ee1ef2413
|
/test/test_human_gene_location.py
|
ed2c5bf611870f00b36c70bc931cf84d6aa20bd0
|
[] |
no_license
|
mosoriob/dbpedia_api_client
|
8c594fc115ce75235315e890d55fbf6bd555fa85
|
8d6f0d04a3a30a82ce0e9277e4c9ce00ecd0c0cc
|
refs/heads/master
| 2022-11-20T01:42:33.481024
| 2020-05-12T23:22:54
| 2020-05-12T23:22:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,901
|
py
|
# coding: utf-8
"""
DBpedia
This is the API of the DBpedia Ontology # noqa: E501
The version of the OpenAPI document: v0.0.1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import dbpedia
from dbpedia.models.human_gene_location import HumanGeneLocation # noqa: E501
from dbpedia.rest import ApiException
class TestHumanGeneLocation(unittest.TestCase):
"""HumanGeneLocation unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test HumanGeneLocation
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = dbpedia.models.human_gene_location.HumanGeneLocation() # noqa: E501
if include_optional :
return HumanGeneLocation(
gene_location_end = [
56
],
gene_location_start = [
56
],
genome_db = [
'0'
],
description = [
'0'
],
on_chromosome = [
56
],
id = '0',
label = [
'0'
],
type = [
'0'
]
)
else :
return HumanGeneLocation(
)
def testHumanGeneLocation(self):
"""Test HumanGeneLocation"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
[
"maxiosorio@gmail.com"
] |
maxiosorio@gmail.com
|
f81a63c4262b48497bf3b90fc7acc5c04f3469e4
|
54f57cf661423b4cb4375ec86557a934ac94a497
|
/proj/run.py
|
c4f36b79ea5076151cfdfb10f7a6911a3fd8a76e
|
[
"MIT"
] |
permissive
|
0xangelo/deep-rl
|
dd87520120e70a5d2325fcd25cdf004ce67f1453
|
9f0c1aafe71852c8973bf1ab732114a3cdbe23ad
|
refs/heads/master
| 2022-02-24T00:21:47.248660
| 2019-06-22T20:51:44
| 2019-06-22T20:51:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,549
|
py
|
"""
The MIT License
Copyright (c) 2018 OpenAI (http://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
Adapted from OpenAI's Spinning Up: https://github.com/openai/spinningup
"""
import sys
import os
import os.path as osp
import subprocess
import proj.algorithms
from random import randrange
from proj.utils.exp_grid import ExperimentGrid
from proj.common.models import *
# Command line args that will go to ExperimentGrid.run, and must possess unique
# values (therefore must be treated separately).
RUN_KEYS = ["log_dir", "format_strs", "datestamp"]
def friendly_err(err_msg):
# add whitespace to error message to make it more readable
return "\n\n" + err_msg + "\n\n"
def parse_and_execute_grid_search(cmd, args):
algo = eval("proj.algorithms." + cmd)
# Before all else, check to see if any of the flags is 'help'.
valid_help = ["--help", "-h", "help"]
if any([arg in valid_help for arg in args]):
print("\n\nShowing docstring for spinup." + cmd + ":\n")
print(algo.__doc__)
sys.exit()
def process(arg):
# Process an arg by eval-ing it, so users can specify more
# than just strings at the command line (eg allows for
# users to give functions as args).
try:
return eval(arg)
except NameError:
return arg
# Make first pass through args to build base arg_dict. Anything
# with a '--' in front of it is an argument flag and everything after,
# until the next flag, is a possible value.
arg_dict = dict()
for i, arg in enumerate(args):
assert i > 0 or "--" in arg, friendly_err("You didn't specify a first flag.")
if "--" in arg:
arg_key = arg.lstrip("-")
arg_dict[arg_key] = []
else:
arg_dict[arg_key].append(process(arg))
# Make second pass through, to catch flags that have no vals.
# Assume such flags indicate that a boolean parameter should have
# value True.
for _, v in arg_dict.items():
if len(v) == 0:
v.append(True)
# Final pass: check for the special args that go to the 'run' command
# for an experiment grid, separate them from the arg dict, and make sure
# that they have unique values. The special args are given by RUN_KEYS.
run_kwargs = dict()
for k in RUN_KEYS:
if k in arg_dict:
val = arg_dict[k]
assert len(val) == 1, friendly_err(
"You can only provide one value for %s." % k
)
run_kwargs[k] = val[0]
del arg_dict[k]
# Determine experiment name. If not given by user, will be determined
# by the algorithm name.
if "exp_name" in arg_dict:
assert len(arg_dict["exp_name"]) == 1, friendly_err(
"You can only provide one value for exp_name."
)
exp_name = arg_dict["exp_name"][0]
del arg_dict["exp_name"]
else:
exp_name = "cmd_" + cmd
# Construct and execute the experiment grid.
eg = ExperimentGrid(name=exp_name)
for k, v in arg_dict.items():
eg.add(k, v)
eg.run(algo, **run_kwargs)
if __name__ == "__main__":
cmd = sys.argv[1]
valid_algos = [
"vanilla",
"natural",
"trpo",
"a2c",
"ppo",
"acktr",
"a2c_kfac",
"ddpg",
"td3",
"sac",
]
valid_utils = ["viskit/frontend", "plot", "sim_policy", "record_policy"]
valid_cmds = valid_algos + valid_utils
assert (
cmd in valid_cmds
), "Select an algorithm or utility which is implemented in proj."
if cmd in valid_algos:
args = sys.argv[2:]
parse_and_execute_grid_search(cmd, args)
elif cmd in valid_utils:
# Execute the correct utility file.
runfile = osp.join(osp.abspath(osp.dirname(__file__)), "utils", cmd + ".py")
args = [sys.executable if sys.executable else "python", runfile] + sys.argv[2:]
subprocess.check_call(args, env=os.environ)
|
[
"angelolovatto@gmail.com"
] |
angelolovatto@gmail.com
|
d5c530b751dafafc0517165d510d97d4829e610b
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02860/s022291938.py
|
d07ef112fa467f50552ffd2851692df5d30addee
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 149
|
py
|
N = int(input())
S = str(input())
flag = False
half = (N+1) // 2
if S[:half] == S[half:N]:
flag = True
if flag:
print("Yes")
else:
print("No")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
f1d0157b2ab380f994197bcf15eb45900e508175
|
db9ff8accaa4d8d4a96d3f9122c0fdc5e83ea2a5
|
/test/test_inventory_location_full.py
|
45ffb25097659f31398e3148e7d153ea0d03eafd
|
[] |
no_license
|
agtt/ebay-openapi-inventory
|
4754cdc8b6765acdb34f6b8f89b017ccbc6b1d2b
|
d990c26f16e811431892ac6401c73c4599c2d414
|
refs/heads/master
| 2023-06-17T10:53:43.204075
| 2021-07-14T18:32:38
| 2021-07-14T18:32:38
| 386,039,734
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,295
|
py
|
"""
Inventory API
The Inventory API is used to create and manage inventory, and then to publish and manage this inventory on an eBay marketplace. There are also methods in this API that will convert eligible, active eBay listings into the Inventory API model. # noqa: E501
The version of the OpenAPI document: 1.13.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import openapi_client
from openapi_client.model.location_details import LocationDetails
from openapi_client.model.operating_hours import OperatingHours
from openapi_client.model.special_hours import SpecialHours
globals()['LocationDetails'] = LocationDetails
globals()['OperatingHours'] = OperatingHours
globals()['SpecialHours'] = SpecialHours
from openapi_client.model.inventory_location_full import InventoryLocationFull
class TestInventoryLocationFull(unittest.TestCase):
"""InventoryLocationFull unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testInventoryLocationFull(self):
"""Test InventoryLocationFull"""
# FIXME: construct object with mandatory attributes with example values
# model = InventoryLocationFull() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"csd@hotmail.com.tr"
] |
csd@hotmail.com.tr
|
aeeeb349ebfd16e04ca2e0194f5292de498ea198
|
bb465b891e5bd14b2be2c6ae008417106cc1d9bb
|
/pycozmo/audiokinetic/soundbanksinfo.py
|
bdebf47c9b23d44c2f5aa0606371bf4afe2ea978
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
ReidsTech/pycozmo
|
4393b157e7a8fba13bc3452226ec45cac5c5b80b
|
1b6dcd9b869a3784f1d8b02e820bb033f95fd13a
|
refs/heads/master
| 2023-02-10T22:45:43.789691
| 2020-11-12T14:39:52
| 2020-11-12T14:39:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,733
|
py
|
"""
AudioKinetic WWise SoundbanksInfo.xml representation and reading.
See assets/cozmo_resources/sound/SoundbanksInfo.xml
"""
from typing import Dict, Any, Union, TextIO
import xml.etree.ElementTree as et # noqa
from . import exception
__all__ = [
"EventInfo",
"FileInfo",
"SoundBankInfo",
"load_soundbanksinfo",
]
class EventInfo:
""" Event representation in SoundbanksInfo.xml . """
__slots__ = [
"soundbank_id",
"id",
"name",
"object_path",
]
def __init__(self, soundbank_id: int, event_id: int, name: str, object_path: str):
self.soundbank_id = int(soundbank_id)
self.id = int(event_id)
self.name = str(name)
self.object_path = str(object_path)
class FileInfo:
""" File representation in SoundbanksInfo.xml . """
__slots__ = [
"soundbank_id",
"id",
"name",
"path",
"embedded",
"prefetch_size",
]
def __init__(self, soundbank_id: int, file_id: int, name: str, path: str, embedded: bool, prefetch_size: int):
self.soundbank_id = int(soundbank_id)
self.id = int(file_id)
self.name = str(name)
self.path = str(path)
self.embedded = bool(embedded)
self.prefetch_size = int(prefetch_size)
def __eq__(self, other: "FileInfo") -> bool:
res = True
res = res and self.soundbank_id == other.soundbank_id
res = res and self.id == other.id
res = res and self.name == other.name
# There are many files that are both embedded and streamed.
# res = res and self.embedded == other.embedded
# res = res and self.prefetch_size == other.prefetch_size
return res
class SoundBankInfo:
""" SoundBank representation in SoundbanksInfo.xml . """
__slots__ = [
"id",
"name",
"path",
"language",
"object_path",
]
def __init__(self, soundbank_id: int, name: str, path: str, language: str, object_path: str):
self.id = int(soundbank_id)
self.name = str(name)
self.path = str(path)
self.language = str(language)
self.object_path = str(object_path)
def load_soundbanksinfo(fspec: Union[str, TextIO]) -> Dict[int, Any]:
""" Load SoundbanksInfo.xml and return a dictionary of parsed Info objects. """
try:
tree = et.parse(fspec)
except et.ParseError as e:
raise exception.AudioKineticFormatError("Failed to parse SoundbanksInfo file.") from e
root = tree.getroot()
# Load StreamedFiles.
streamed_files = {}
for file in root.findall("./StreamedFiles/File"):
file_id = int(file.get("Id"))
assert file_id not in streamed_files
streamed_files[file_id] = {
"id": file_id,
"language": file.get("Language"),
"name": file.find("ShortName").text,
"path": file.find("Path").text,
}
# Load SoundBanks
objects = {}
for soundbank_node in root.findall("./SoundBanks/SoundBank"):
# Create SoundBankInfo object.
soundbank_id = int(soundbank_node.get("Id"))
language = soundbank_node.get("Language")
soundbank = SoundBankInfo(
soundbank_id,
soundbank_node.find("ShortName").text,
soundbank_node.find("Path").text,
language,
soundbank_node.find("ObjectPath").text)
assert soundbank_id not in objects
objects[soundbank_id] = soundbank
# Create EventInfo objects.
events = soundbank_node.findall("./IncludedEvents/Event")
for event_node in events:
event_id = int(event_node.get("Id"))
event = EventInfo(
soundbank_id,
event_id,
event_node.get("Name"),
event_node.get("ObjectPath"))
assert event_id not in objects
objects[event_id] = event
# Create FileInfo objects for streamed files.
files = soundbank_node.findall("./ReferencedStreamedFiles/File")
for file_node in files:
file_id = int(file_node.get("Id"))
streamed_file = streamed_files[file_id]
# The file and SoundBank languages may differ.
# assert streamed_file["language"] == language
file = FileInfo(
soundbank_id,
file_id,
streamed_file["name"],
streamed_file["path"],
False,
-1)
assert file_id not in objects
objects[file_id] = file
# Create FileInfo objects for embedded files.
files = soundbank_node.findall("./IncludedMemoryFiles/File")
for file_node in files:
file_id = int(file_node.get("Id"))
# The file and SoundBank languages may differ.
# assert file_node.get("Language") == language
prefetch_size_node = file_node.find("PrefetchSize")
prefetch_size = int(prefetch_size_node.text) if prefetch_size_node is not None else -1
file = FileInfo(
soundbank_id,
file_id,
file_node.find("ShortName").text,
file_node.find("Path").text,
True,
prefetch_size)
# assert file_id not in objects
if file_id in objects:
# Many files exist externally and as a "prefetched" embedded file that is truncated.
assert file == objects[file_id]
if not file.embedded:
objects[file_id] = file
else:
objects[file_id] = file
return objects
|
[
"zayfod@gmail.com"
] |
zayfod@gmail.com
|
0ed43fd97bc1aaa0a8dd69685412961f634b5081
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/twosum_20200716164442.py
|
d7b038af3c1ba2d0728e86c4a678d19d439a0ad0
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 606
|
py
|
def twoSum(nums,target):
# loop through the array
# add the two nums checking whethere they are equal to the target
# when you get one that is equal to the target append the indices...
index = []
for i in range(len(nums)-1):
for j in range(i+1,len(nums)):
if nums[i]+ nums[j] == target:
index.append(nums[i])
index.append(nums[j])
print(index)
def two(nums,S):
sums = []
check = {}
for i in range(len(nums)):
minus = S - nums[i]
if str(minus) not in
twoSum([2, 7, 11, 15],9)
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
250a4f07e7a707025c6c2dca69887c9dd8db0074
|
8048d04a51cd8b14b9de52c1ab4c0b670a8d8cc9
|
/blog/models.py
|
bda80cb88c8d0a1f1c39f18ba8f6672b578e0796
|
[] |
no_license
|
earvingemenez/myblog
|
2520bb08278272cdb1d3916b07c1b5167d6f5245
|
18b560efc0c0e20ecca0d0383b727785d713f776
|
refs/heads/master
| 2021-01-19T22:43:21.359989
| 2017-04-20T10:14:37
| 2017-04-20T10:14:37
| 88,848,933
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 299
|
py
|
from __future__ import unicode_literals
from django.db import models
class Blog(models.Model):
title = models.CharField(max_length=250)
content = models.TextField()
date_created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return "{}".format(self.title)
|
[
"earvin.gemenez@gmail.com"
] |
earvin.gemenez@gmail.com
|
31583fc5b7d6736e1fd571f3c2c2951d935d9380
|
1825283527f5a479204708feeaf55f4ab6d1290b
|
/leetcode/python/248/original/248.strobogrammatic-number-iii.py
|
f15a21644c1adb868cb39614cde884e402e9deb3
|
[] |
no_license
|
frankieliu/problems
|
b82c61d3328ffcc1da2cbc95712563355f5d44b5
|
911c6622448a4be041834bcab25051dd0f9209b2
|
refs/heads/master
| 2023-01-06T14:41:58.044871
| 2019-11-24T03:47:22
| 2019-11-24T03:47:22
| 115,065,956
| 1
| 0
| null | 2023-01-04T07:25:52
| 2017-12-22T02:06:57
|
HTML
|
UTF-8
|
Python
| false
| false
| 837
|
py
|
#
# @lc app=leetcode id=248 lang=python3
#
# [248] Strobogrammatic Number III
#
# https://leetcode.com/problems/strobogrammatic-number-iii/description/
#
# algorithms
# Hard (35.88%)
# Total Accepted: 18.6K
# Total Submissions: 51.7K
# Testcase Example: '"50"\n"100"'
#
# A strobogrammatic number is a number that looks the same when rotated 180
# degrees (looked at upside down).
#
# Write a function to count the total strobogrammatic numbers that exist in the
# range of low <= num <= high.
#
# Example:
#
#
# Input: low = "50", high = "100"
# Output: 3
# Explanation: 69, 88, and 96 are three strobogrammatic numbers.
#
# Note:
# Because the range might be a large number, the low and high numbers are
# represented as string.
#
#
class Solution:
def strobogrammaticInRange(self, low: str, high: str) -> int:
|
[
"frankie.y.liu@gmail.com"
] |
frankie.y.liu@gmail.com
|
a508f7fff35fe197202d0930f5a4a128153dafe8
|
5d1c43bb4881039f198eedcee2ceb101b406e0a0
|
/Django/myvenv/Scripts/rst2pseudoxml.py
|
14b4d31f246fab38616c63e1ebada18b21f34d47
|
[] |
no_license
|
MunSeoHee/Likelion_Gachon_2020
|
46155b1686a245a59c5664f7726ac754b7079e4b
|
e0e48845fdb0e4aa2365e7c47e29880a27f0f261
|
refs/heads/master
| 2021-04-10T09:51:06.618980
| 2020-12-07T10:06:43
| 2020-12-07T10:06:43
| 248,927,668
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 665
|
py
|
#!c:\users\munseohee\desktop\likelion_gachon_2020\django\myvenv\scripts\python.exe
# $Id: rst2pseudoxml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing pseudo-XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates pseudo-XML from standalone reStructuredText '
'sources (for testing purposes). ' + default_description)
publish_cmdline(description=description)
|
[
"nansh9815@naver.com"
] |
nansh9815@naver.com
|
0358c0a7d390749d6a438898355cb946deba8891
|
6b9b032a5516c8d7dbb26deeb1b189022f8f9411
|
/LeetCode/math/326.3的幂.py
|
c748ea5a012df0e8d72d3e38e176aa8eefb1eadb
|
[] |
no_license
|
mrmenand/Py_transaction
|
84db99a0010ae90f43fba6b737d7035e48af55fb
|
7e82422c84ad699805cc12568b8d3d969f66a419
|
refs/heads/master
| 2021-07-13T21:15:03.714689
| 2020-06-21T11:49:31
| 2020-06-21T11:49:31
| 176,281,954
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 163
|
py
|
# 326. 3的幂
class Solution:
def isPowerOfThree(self, n: int) -> bool:
ret = 1
while ret < n:
ret *= 3
return ret == n
|
[
"1006024749@qq.com"
] |
1006024749@qq.com
|
5cdc9a19af934f9266f1825c6652b217388655ed
|
111b4a8e40afb25fe9c906e89b3d31ccde18447b
|
/07b - Iteraties-WhileLus/Kassa.py
|
4a0ca6b9223b11b1bd5165e99b762bf2937f5301
|
[] |
no_license
|
astilleman/Informatica5
|
722276619b5c2a80e03ad97a6c67bdb25f12bda2
|
9cbd82055c621c25ec372547c179bcf045f6629f
|
refs/heads/master
| 2020-03-28T03:19:12.206052
| 2019-05-16T08:05:54
| 2019-05-16T08:05:54
| 147,637,244
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
#bewerking
totale_prijs = 0
prijs = float(input('Prijs van het product: '))
while prijs > 0:
totale_prijs += prijs
prijs = float(input('Prijs van het product: '))
mes = 'De totale prijs is € {:.2f}'.format(totale_prijs)
#uitvoer
print(mes)
|
[
"43027764+astilleman@users.noreply.github.com"
] |
43027764+astilleman@users.noreply.github.com
|
a77d9836bd428d402dfa9f88a23deaf8f561556e
|
067f015c6b70c18c78dc9c6976c10d77b2f6bb03
|
/tests/test_extension.py
|
ef0f38593557e3ffde9f7323b34d1e17c8252893
|
[
"Apache-2.0"
] |
permissive
|
SolarisYan/jupyterlab_autoversion
|
d146a99f8e975def441a2a22ceb89c27f1510123
|
f16a1f0833cd278df390e7cc3c72aa4569dd4b8e
|
refs/heads/master
| 2020-04-26T12:17:01.191856
| 2019-02-14T14:07:01
| 2019-02-14T14:07:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 737
|
py
|
# for Coverage
from mock import patch, MagicMock
from jupyterlab_autoversion.extension import load_jupyter_server_extension
class TestExtension:
def setup(self):
pass
# setup() before each test method
def teardown(self):
pass
# teardown() after each test method
@classmethod
def setup_class(cls):
pass
# setup_class() before any methods in this class
@classmethod
def teardown_class(cls):
pass
# teardown_class() after any methods in this class
def test_load_jupyter_server_extension(self):
m = MagicMock()
m.web_app.settings = {}
m.web_app.settings['base_url'] = '/test'
load_jupyter_server_extension(m)
|
[
"t.paine154@gmail.com"
] |
t.paine154@gmail.com
|
59179e3611dfd909c95ac81a1ccb88b8d87bd48d
|
aec6d287856a06e8b366216b5562b76d46978dc6
|
/specialelaven.py
|
d85f1f38564a80b40e58bd012bb94922693c77aa
|
[] |
no_license
|
subbuinti/python_practice
|
b724a4e562b63c66d23b67093219d5a4bf1e1f0f
|
ffdab933586609b3760681effccdfead0b90033f
|
refs/heads/master
| 2023-07-12T17:47:00.773554
| 2021-08-31T14:26:51
| 2021-08-31T14:26:51
| 395,187,797
| 0
| 0
| null | 2021-08-31T14:22:56
| 2021-08-12T03:54:58
|
Python
|
UTF-8
|
Python
| false
| false
| 219
|
py
|
number = int(input())
is_multiply_by_11 = ((number % 11) == 0)
is_multiple_times_by_11 =((number % 11) == 1)
if is_multiply_by_11 or is_multiple_times_by_11:
print("Special Eleven")
else:
print("Normal Number")
|
[
"subbuinti1@gmail.com"
] |
subbuinti1@gmail.com
|
e3fbaa41424466c1205d5c08baabfd0e8be8d179
|
45467e07e77131f631d0865046dcc4d18f483601
|
/src/Codeforces/manthan_codefest18/B.py
|
d3ed1ae6fd8350c021bcad1b6d526a63371793da
|
[] |
no_license
|
bvsbrk/Algos
|
98374e094bd3811579276d25a82bbf2c0f046d96
|
cbb18bce92054d57c0e825069ef7f2120a9cc622
|
refs/heads/master
| 2021-09-25T10:01:59.323857
| 2018-10-20T16:07:07
| 2018-10-20T16:07:07
| 98,708,839
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 478
|
py
|
if __name__ == '__main__':
n, s = [int(__) for __ in input().strip().split()]
arr = [int(__) for __ in input().strip().split()]
arr.sort()
mid = n // 2
ans = 0
if s == arr[mid]:
print(ans)
elif s > arr[mid]:
while mid < n and arr[mid] < s:
ans += s - arr[mid]
mid += 1
print(ans)
else:
while mid >= 0 and arr[mid] > s:
ans += arr[mid] - s
mid -= 1
print(ans)
|
[
"v.ramakoteswararao2015@vit.ac.in"
] |
v.ramakoteswararao2015@vit.ac.in
|
0e2d85e036627e1e34c67e17618b48e652a18d05
|
e3a6939a2faaf9bfd7ed21e7a877d2bd2125b130
|
/projects/migrations/0006_alter_portfolio_main_image.py
|
802a68ae6f98fe2b1e907debaaf8c9c2427ecd06
|
[] |
no_license
|
furkalokbu/PortfolioTime
|
b133a64ec1472a12b878b87cf8e0706fdf39a16a
|
c4233930cd0988a80c65edf2079d4a560987d225
|
refs/heads/main
| 2023-04-29T21:24:30.629206
| 2021-05-14T15:49:14
| 2021-05-14T15:49:14
| 367,320,283
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 624
|
py
|
# Generated by Django 3.2.2 on 2021-05-13 18:23
from django.db import migrations, models
import djlime.utils
class Migration(migrations.Migration):
dependencies = [
("projects", "0005_portfolio_main_image"),
]
operations = [
migrations.AlterField(
model_name="portfolio",
name="main_image",
field=models.ImageField(
blank=True,
help_text="recommended size 1000x665",
null=True,
upload_to=djlime.utils.get_file_path,
verbose_name="main image",
),
),
]
|
[
"furkalokbu@gmail.com"
] |
furkalokbu@gmail.com
|
c11b62bf5d9810407247cd30ec391cedacc33a4b
|
8780bc7f252f14ff5406ce965733c099034920b7
|
/pyCode/python3.6网络爬虫实战/16-使用Selenium模拟浏览器抓取淘宝商品美食信息/16-使用Selenium模拟浏览器抓取淘宝商品美食信息.py
|
31abbc0c226bf81b218662c13dc366640a3d2005
|
[] |
no_license
|
13661892653/workspace
|
5e4e458d31b9355c67d67ba7d9faccbcc1ac9f6b
|
17960becabb3b4f0fc30009c71a11c4f7a5f8330
|
refs/heads/master
| 2020-12-24T20:00:15.541432
| 2018-08-14T13:56:15
| 2018-08-14T13:56:15
| 86,225,975
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,070
|
py
|
#coding=utf-8
#Version:Python-3.6.0
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from pyquery import PyQuery
import pymongo
import re
from config import *
browser = webdriver.Chrome()
client=pymongo.MongoClient(MONGO_URL)
db=client[MONGO_DB]
def search():
try:
browser.get('https://www.taobao.com/')
input = WebDriverWait(browser, 10).until(
EC.presence_of_element_located((By.CSS_SELECTOR, "#q")))
submit=WebDriverWait(browser,10).until(EC.element_to_be_clickable((By.CSS_SELECTOR,'#J_TSearchForm > div.search-button > button')))
input.send_keys('婴儿用品')
submit.click()
total=WebDriverWait(browser, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR,'#mainsrp-pager > div > div > div > div.total')))
get_products()
return total.text
except TimeoutException:
print('超时啦.............',TimeoutException)
return search()
def next_page(page_number):
try:
input = WebDriverWait(browser, 10).until(
EC.presence_of_element_located((By.CSS_SELECTOR, "#mainsrp-pager > div > div > div > div.form > input")))
submit = WebDriverWait(browser, 10).until(
EC.element_to_be_clickable((By.CSS_SELECTOR, '#mainsrp-pager > div > div > div > div.form > span.btn.J_Submit')))
input.clear()
input.send_keys(page_number)
submit.click()
WebDriverWait(browser, 10).until(
EC.text_to_be_present_in_element((By.CSS_SELECTOR, '#mainsrp-pager > div > div > div > ul > li.item.active > span'),str(page_number))
)
get_products()
except TimeoutException:
print('超时啦.............')
next_page(page_number)
def get_products():
print("正在获取详情信息...")
WebDriverWait(browser, 10).until(
EC.presence_of_element_located((By.CSS_SELECTOR,'#mainsrp-itemlist .items .item'))
)
html=browser.page_source
doc=PyQuery(html)
items=doc('#mainsrp-itemlist .items .item').items()
for item in items:
product={
'首图': item.find('.pic .img').attr('src'),
'价格': item.find('.price').text().replace('\n',' '),
'购买人数': item.find('.deal-cnt').text()[0:-3],
'宝贝标题': item.find('.title').text().replace('\n',' '),
'店铺名称': item.find('.shop').text(),
'店铺位置': item.find('.location').text()
}
print(product)
save_to_mongo(product)
def save_to_mongo(result):
if result:
if db[MONGO_TABLE].insert(result):
print('插入成功')
return True
return False
def main():
total=search()
total=int(re.compile('(\d+)').search(total).group(1))
for i in range(2,total+1):
next_page(i)
if __name__=='__main__':
main()
|
[
"470563152@qq.com"
] |
470563152@qq.com
|
734c3ae12014f9b1251ed08b02aa2812c6836d5a
|
f159aeec3408fe36a9376c50ebb42a9174d89959
|
/1094.Car-Pooling.py
|
ca906b5c2ebfe85078f842d56167aa7405bbd8e4
|
[
"MIT"
] |
permissive
|
mickey0524/leetcode
|
83b2d11ab226fad5da7198bb37eeedcd8d17635a
|
fc5b1744af7be93f4dd01d6ad58d2bd12f7ed33f
|
refs/heads/master
| 2023-09-04T00:01:13.138858
| 2023-08-27T07:43:53
| 2023-08-27T07:43:53
| 140,945,128
| 27
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 698
|
py
|
# https://leetcode.com/problems/car-pooling/
# Easy (56.22%)
# Total Accepted: 3,367
# Total Submissions: 5,989
class Solution(object):
def carPooling(self, trips, capacity):
"""
:type trips: List[List[int]]
:type capacity: int
:rtype: bool
"""
trips.sort(key=lambda x: x[1])
q = []
for t in trips:
capacity -= t[0]
q += t,
tmp = []
for q_t in q:
if q_t[2] <= t[1]:
capacity += q_t[0]
else:
tmp += q_t,
if capacity < 0:
return False
q = tmp
return True
|
[
"buptbh@163.com"
] |
buptbh@163.com
|
ec09c23ed955776b9848aa5c94f2f4b1c92574bf
|
3f7e2c4e9a4dcee8f788150e1ead8fea1ea29b9d
|
/src/spaceone/statistics/service/schedule_service.py
|
90d817258de4d4e8093583d0e0619f546f8f4ec0
|
[
"Apache-2.0"
] |
permissive
|
choonho/statistics
|
8fd5d5cb88c7b1d028cc1074cbafebfcd38e1371
|
31fbae2d0772a2e8b717ac12c8de9edd9d8f1734
|
refs/heads/master
| 2023-03-04T04:16:21.121928
| 2020-08-18T12:54:26
| 2020-08-18T12:54:26
| 289,881,803
| 0
| 0
| null | 2020-08-24T09:17:07
| 2020-08-24T09:17:07
| null |
UTF-8
|
Python
| false
| false
| 7,013
|
py
|
import logging
import copy
from spaceone.core.service import *
from spaceone.statistics.error import *
from spaceone.statistics.manager.resource_manager import ResourceManager
from spaceone.statistics.manager.schedule_manager import ScheduleManager
_LOGGER = logging.getLogger(__name__)
@authentication_handler
@authorization_handler
@event_handler
class ScheduleService(BaseService):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.resource_mgr: ResourceManager = self.locator.get_manager('ResourceManager')
self.schedule_mgr: ScheduleManager = self.locator.get_manager('ScheduleManager')
@transaction
@check_required(['topic', 'options', 'schedule', 'domain_id'])
def add(self, params):
"""Add schedule for statistics
Args:
params (dict): {
'topic': 'str',
'options': 'dict',
'schedule': 'dict',
'tags': 'dict',
'domain_id': 'str'
}
Returns:
schedule_vo
"""
domain_id = params['domain_id']
options = copy.deepcopy(params['options'])
schedule = params['schedule']
self._check_schedule(schedule)
self._verify_query_option(options, domain_id)
return self.schedule_mgr.add_schedule(params)
@transaction
@check_required(['schedule_id', 'domain_id'])
def update(self, params):
"""Update schedule
Args:
params (dict): {
'schedule_id': 'str',
'schedule': 'dict',
'tags': 'dict',
'domain_id': 'str'
}
Returns:
schedule_vo
"""
schedule = params.get('schedule')
self._check_schedule(schedule)
return self.schedule_mgr.update_schedule(params)
@transaction
@check_required(['schedule_id', 'domain_id'])
def enable(self, params):
"""Enable schedule
Args:
params (dict): {
'schedule_id': 'str',
'domain_id': 'str'
}
Returns:
schedule_vo
"""
domain_id = params['domain_id']
schedule_id = params['schedule_id']
schedule_vo = self.schedule_mgr.get_schedule(schedule_id, domain_id)
return self.schedule_mgr.update_schedule_by_vo({'state': 'ENABLED'}, schedule_vo)
@transaction
@check_required(['schedule_id', 'domain_id'])
def disable(self, params):
"""Disable schedule
Args:
params (dict): {
'schedule_id': 'str',
'domain_id': 'str'
}
Returns:
schedule_vo
"""
domain_id = params['domain_id']
schedule_id = params['schedule_id']
schedule_vo = self.schedule_mgr.get_schedule(schedule_id, domain_id)
return self.schedule_mgr.update_schedule_by_vo({'state': 'DISABLED'}, schedule_vo)
@transaction
@check_required(['schedule_id', 'domain_id'])
def delete(self, params):
"""Delete schedule
Args:
params (dict): {
'schedule_id': 'str',
'domain_id': 'str'
}
Returns:
None
"""
self.schedule_mgr.delete_schedule(params['schedule_id'], params['domain_id'])
@transaction
@check_required(['schedule_id', 'domain_id'])
def get(self, params):
"""Get schedule
Args:
params (dict): {
'schedule_id': 'str',
'domain_id': 'str',
'only': 'list'
}
Returns:
schedule_vo
"""
return self.schedule_mgr.get_schedule(params['schedule_id'], params['domain_id'], params.get('only'))
@transaction
@check_required(['domain_id'])
@append_query_filter(['schedule_id', 'topic', 'state', 'data_source_id', 'resource_type', 'domain_id'])
@append_keyword_filter(['schedule_id', 'topic', 'resource_type'])
def list(self, params):
""" List schedules
Args:
params (dict): {
'schedule_id': 'str',
'topic': 'str',
'state': 'str',
'data_source_id': 'str',
'resource_type': 'str',
'domain_id': 'str',
'query': 'dict (spaceone.api.core.v1.Query)'
}
Returns:
schedule_vos (object)
total_count
"""
query = params.get('query', {})
return self.schedule_mgr.list_schedules(query)
@transaction
@check_required(['query', 'domain_id'])
@append_query_filter(['domain_id'])
def stat(self, params):
"""
Args:
params (dict): {
'domain_id': 'str',
'query': 'dict (spaceone.api.core.v1.StatisticsQuery)'
}
Returns:
values (list) : 'list of statistics data'
"""
query = params.get('query', {})
return self.schedule_mgr.stat_schedules(query)
@transaction
@append_query_filter([])
def list_domains(self, params):
""" This is used by Scheduler
Returns:
results (list)
total_count (int)
"""
mgr = self.locator.get_manager('ScheduleManager')
query = params.get('query', {})
result = mgr.list_domains(query)
return result
@staticmethod
def _check_schedule(schedule):
if schedule and len(schedule) > 1:
raise ERROR_SCHEDULE_OPTION()
@staticmethod
def _check_query_option(options):
if 'resource_type' not in options:
raise ERROR_REQUIRED_PARAMETER(key='option.resource_type')
if 'query' not in options:
raise ERROR_REQUIRED_PARAMETER(key='option.query')
def _verify_query_option(self, options, domain_id):
self._check_query_option(options)
resource_type = options['resource_type']
query = options['query']
distinct = query.get('distinct')
join = options.get('join', [])
formulas = options.get('formulas', [])
sort = query.get('sort')
page = query.get('page', {})
limit = query.get('limit')
has_join_or_formula = len(join) > 0 or len(formulas) > 0
if distinct:
if has_join_or_formula:
raise ERROR_STATISTICS_DISTINCT()
else:
if has_join_or_formula:
query['sort'] = None
query['page'] = None
query['limit'] = None
response = self.resource_mgr.stat(resource_type, query, domain_id)
if has_join_or_formula:
results = response.get('results', [])
self.resource_mgr.join_and_execute_formula(results, resource_type, query, join,
formulas, sort, page, limit, domain_id)
|
[
"whdalsrnt@megazone.com"
] |
whdalsrnt@megazone.com
|
d129e4c0812fa19f8e5a6c71e7e94860e1283e5f
|
c1f1900d0f1522cfb6c0148ccd1138e3a3503ba8
|
/generativeopenset/options.py
|
1ccde89c0051407dab95ba2371390693424f4dd0
|
[] |
no_license
|
yunruiguo/counterfactual-openset
|
4252c79bc799de0856cf019b4f13308e737a137c
|
fdba67466a877ed81de645b6c856d6f0a36006db
|
refs/heads/master
| 2023-02-28T21:54:44.175036
| 2021-02-02T14:02:00
| 2021-02-02T14:02:00
| 335,285,749
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,386
|
py
|
import os
import subprocess
import json
from pprint import pprint
def save_options(options):
# Include the version of the code that saved the options
# (in case the meaning of an option changes in the future)
if 'version' not in options:
options['version'] = get_code_version()
if not os.path.exists(options['result_dir']):
print("Creating result directory {}".format(options['result_dir']))
os.makedirs(options['result_dir'])
filename = os.path.join(options['result_dir'], 'params.json')
with open(filename, 'w') as fp:
print("Saving options to {}".format(filename))
to_save = options.copy()
# Do not save result_dir; always read it from the command line
del to_save['result_dir']
json.dump(to_save, fp, indent=2, sort_keys=True)
def load_options(options):
print("Resuming existing experiment at {} with options:".format(options['result_dir']))
param_path = get_param_path(options['result_dir'])
old_opts = json.load(open(param_path))
options.update(old_opts)
options['result_dir'] = os.path.expanduser(options['result_dir'])
pprint(options)
return options
def get_param_path(result_dir):
if os.path.exists(os.path.join(result_dir, 'params.json')):
return os.path.join(result_dir, 'params.json')
elif os.path.exists(os.path.join(result_dir, 'default_params.json')):
return os.path.join(result_dir, 'default_params.json')
raise ValueError("Could not find {}/params.json".format(result_dir))
def get_current_epoch(result_dir):
checkpoints_path = os.path.join(result_dir, 'checkpoints')
filenames = os.listdir(checkpoints_path)
model_filenames = [f for f in filenames if f.endswith('.pth')]
if not model_filenames:
return 0
def filename_to_epoch(filename):
tokens = filename.rstrip('.pth').split('_')
try:
return int(tokens[-1])
except ValueError:
return 0
return max(filename_to_epoch(f) for f in model_filenames)
def get_code_version():
cwd = os.path.dirname(__file__)
try:
output = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=cwd)
except subprocess.CalledProcessError:
print("Warning: Failed git rev-parse, current code version unknown")
return "unknown"
return output.strip().decode('utf-8')
|
[
"nealla@lwneal.com"
] |
nealla@lwneal.com
|
42e89f451bbe4d4030bb30fcf32c0531c44d827a
|
147d6678b8c99bd1e18b20814f259dc25a395ca6
|
/python daily coding/2020.4.16 (문자열)/2675번 (문자열 반복).py
|
fd8d8f2aec4445d09f7e86b6a80d085465055c18
|
[] |
no_license
|
omy5123/Oh-min-young
|
7759cf869720d58fb07edc0e8f5a9b013afacc95
|
7db08ab828cc28cb9f477ea5410a48245a156fef
|
refs/heads/master
| 2021-05-19T07:08:01.379930
| 2021-01-17T07:51:49
| 2021-01-17T07:51:49
| 251,577,901
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,023
|
py
|
"""
문제
문자열 S를 입력받은 후에, 각 문자를 R번 반복해 새 문자열 P를 만든 후 출력하는 프로그램을 작성하시오. 즉, 첫 번째 문자를 R번 반복하고, 두 번째 문자를 R번 반복하는 식으로 P를 만들면 된다. S에는 QR Code "alphanumeric" 문자만 들어있다.
QR Code "alphanumeric" 문자는 0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ\$%*+-./: 이다.
입력
첫째 줄에 테스트 케이스의 개수 T(1 ≤ T ≤ 1,000)가 주어진다. 각 테스트 케이스는 반복 횟수 R(1 ≤ R ≤ 8), 문자열 S가 공백으로 구분되어 주어진다. S의 길이는 적어도 1이며, 20글자를 넘지 않는다.
출력
각 테스트 케이스에 대해 P를 출력한다.
예제 입력 1
2
3 ABC
5 /HTP
예제 출력 1
AAABBBCCC
/////HHHHHTTTTTPPPPP
"""
a = int(input())
for i in range(a):
b = list(map(str, input().split()))
b1 = int(b[0])
c = list(map(str, b[1]))
for j in range(len(c)):
print(c[j] * b1, end='')
print()
|
[
"omy5123@naver.com"
] |
omy5123@naver.com
|
72a5fad825c61fa86d5ef8ed291f73492c1c0c5f
|
df2cbe914f463ad050d7ed26194424afbe3a0a52
|
/addons/website_sale_delivery/models/sale_order.py
|
8348b35afb5921f18f137f6b7cf52a75f79e7da7
|
[
"Apache-2.0"
] |
permissive
|
SHIVJITH/Odoo_Machine_Test
|
019ed339e995be980606a2d87a63312ddc18e706
|
310497a9872db7844b521e6dab5f7a9f61d365a4
|
refs/heads/main
| 2023-07-16T16:23:14.300656
| 2021-08-29T11:48:36
| 2021-08-29T11:48:36
| 401,010,175
| 0
| 0
|
Apache-2.0
| 2021-08-29T10:13:58
| 2021-08-29T10:13:58
| null |
UTF-8
|
Python
| false
| false
| 4,297
|
py
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
from odoo import api, fields, models
_logger = logging.getLogger(__name__)
class SaleOrder(models.Model):
_inherit = 'sale.order'
amount_delivery = fields.Monetary(
compute='_compute_amount_delivery',
string='Delivery Amount',
help="The amount without tax.", store=True, tracking=True)
def _compute_website_order_line(self):
super(SaleOrder, self)._compute_website_order_line()
for order in self:
order.website_order_line = order.website_order_line.filtered(lambda l: not l.is_delivery)
@api.depends('order_line.price_unit', 'order_line.tax_id', 'order_line.discount', 'order_line.product_uom_qty')
def _compute_amount_delivery(self):
for order in self:
if self.env.user.has_group('account.group_show_line_subtotals_tax_excluded'):
order.amount_delivery = sum(order.order_line.filtered('is_delivery').mapped('price_subtotal'))
else:
order.amount_delivery = sum(order.order_line.filtered('is_delivery').mapped('price_total'))
def _check_carrier_quotation(self, force_carrier_id=None):
self.ensure_one()
DeliveryCarrier = self.env['delivery.carrier']
if self.only_services:
self.write({'carrier_id': None})
self._remove_delivery_line()
return True
else:
self = self.with_company(self.company_id)
# attempt to use partner's preferred carrier
if not force_carrier_id and self.partner_shipping_id.property_delivery_carrier_id:
force_carrier_id = self.partner_shipping_id.property_delivery_carrier_id.id
carrier = force_carrier_id and DeliveryCarrier.browse(force_carrier_id) or self.carrier_id
available_carriers = self._get_delivery_methods()
if carrier:
if carrier not in available_carriers:
carrier = DeliveryCarrier
else:
# set the forced carrier at the beginning of the list to be verfied first below
available_carriers -= carrier
available_carriers = carrier + available_carriers
if force_carrier_id or not carrier or carrier not in available_carriers:
for delivery in available_carriers:
verified_carrier = delivery._match_address(self.partner_shipping_id)
if verified_carrier:
carrier = delivery
break
self.write({'carrier_id': carrier.id})
self._remove_delivery_line()
if carrier:
res = carrier.rate_shipment(self)
if res.get('success'):
self.set_delivery_line(carrier, res['price'])
self.delivery_rating_success = True
self.delivery_message = res['warning_message']
else:
self.set_delivery_line(carrier, 0.0)
self.delivery_rating_success = False
self.delivery_message = res['error_message']
return bool(carrier)
def _get_delivery_methods(self):
address = self.partner_shipping_id
# searching on website_published will also search for available website (_search method on computed field)
return self.env['delivery.carrier'].sudo().search([('website_published', '=', True)]).available_carriers(address)
def _cart_update(self, product_id=None, line_id=None, add_qty=0, set_qty=0, **kwargs):
""" Override to update carrier quotation if quantity changed """
self._remove_delivery_line()
# When you update a cart, it is not enouf to remove the "delivery cost" line
# The carrier might also be invalid, eg: if you bought things that are too heavy
# -> this may cause a bug if you go to the checkout screen, choose a carrier,
# then update your cart (the cart becomes uneditable)
self.write({'carrier_id': False})
values = super(SaleOrder, self)._cart_update(product_id, line_id, add_qty, set_qty, **kwargs)
return values
|
[
"36736117+SHIVJITH@users.noreply.github.com"
] |
36736117+SHIVJITH@users.noreply.github.com
|
2f7faf8cb1215010b6f4f201ca1e2084f22cdd66
|
09de981a1b1591f85e41147a299885e60f7cea93
|
/ivona_api/__init__.py
|
12d2e343b511d0b5f83847e836f1f82e3f4ebef5
|
[
"MIT",
"Python-2.0"
] |
permissive
|
Pythonity/python-ivona-api
|
c95951509f7dda90812c2d325beb82ad34560d1f
|
490a2e502d4aa769b9f41603eb5d5e5ebf1ea912
|
refs/heads/master
| 2020-12-18T16:00:19.157330
| 2017-06-15T21:02:07
| 2017-06-15T21:02:07
| 60,565,253
| 10
| 3
| null | 2017-01-20T23:15:10
| 2016-06-06T22:33:22
|
Python
|
UTF-8
|
Python
| false
| false
| 416
|
py
|
# -*- coding: utf-8 -*-
"""
ivona_api - Python library that helps you connect to Amazon's IVONA Speech
Cloud from within your Python project.
"""
from __future__ import absolute_import, unicode_literals
from ivona_api.ivona_api import IvonaAPI # noqa
__title__ = 'ivona_api'
__version__ = '0.3.0'
__author__ = 'Pythonity'
__license__ = 'MIT'
__url__ = 'https://github.com/Pythonity/python-ivona-api'
|
[
"pawel.ad@gmail.com"
] |
pawel.ad@gmail.com
|
f9ca580abef29cccb94846ae9d25defc6a9972cc
|
a8828f1b8d443e44be8521d63b212e33c6bd7cbb
|
/app/sprites/tile_map.py
|
16b23e9fc61336084860a295778fd5c873d0a668
|
[] |
no_license
|
JordanSlaman/pygame-cc
|
ab8e156ee7b27296996efc212ab23a18de122325
|
fe2ac5ceb2c8f2cdb37dd1efa657723b4a45d1b9
|
refs/heads/main
| 2023-03-13T05:49:21.705879
| 2021-03-07T01:22:39
| 2021-03-07T01:22:39
| 345,232,003
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,538
|
py
|
from pathlib import Path
tile_sprite_filepath = Path('./sprites/tiles.png')
tile_size = 34
column = [0, 34, 68, 102, 136, 170, 204, 238]
row = [14, 48, 82, 116, 150, 184, 218, 252, 286, 320, 354, 388, 422, 456, 490, 524]
tiles = {
"player_down": (column[0], row[0]),
"player_left": (column[1], row[0]),
"player_up": (column[2], row[0]),
"player_right": (column[3], row[0]),
"player_down_masked": (column[4], row[0]),
"player_left_masked": (column[5], row[0]),
"player_up_masked": (column[6], row[0]),
"player_right_masked": (column[7], row[0]),
"player_down_water": (column[0], row[1]),
"player_left_water": (column[1], row[1]),
"player_up_water": (column[2], row[1]),
"player_right_water": (column[3], row[1]),
"player_drowned": (column[4], row[1]),
"player_burned": (column[5], row[1]),
# "unused": (column[6], row[1]),
# "unused": (column[7], row[1]),
"key_red": (column[0], row[2]),
"key_blue": (column[1], row[2]),
"key_yellow": (column[2], row[2]),
"key_green": (column[3], row[2]),
"key_red_masked": (column[4], row[2]),
"key_blue_masked": (column[5], row[2]),
"key_green_masked": (column[6], row[2]),
"key_yellow_masked": (column[7], row[2]),
"boots_skate": (column[0], row[3]),
"boots_suction": (column[1], row[3]),
"boots_fireproof": (column[2], row[3]),
"boots_flipper": (column[3], row[3]),
"boots_skate_masked": (column[4], row[3]),
"boots_suction_masked": (column[5], row[3]),
"boots_fireproof_masked": (column[6], row[3]),
"boots_flipper_masked": (column[7], row[3]),
"bug_down": (column[0], row[4]),
"bug_left": (column[1], row[4]),
"bug_up": (column[2], row[4]),
"bug_right": (column[3], row[4]),
"bug_down_masked": (column[4], row[4]),
"bug_left_masked": (column[5], row[4]),
"bug_up_masked": (column[6], row[4]),
"bug_right_masked": (column[7], row[4]),
"tank_down": (column[0], row[5]),
"tank_left": (column[1], row[5]),
"tank_up": (column[2], row[5]),
"tank_right": (column[3], row[5]),
"tank_down_masked": (column[4], row[5]),
"tank_left_masked": (column[5], row[5]),
"tank_up_masked": (column[6], row[5]),
"tank_right_masked": (column[7], row[5]),
# ball row
"ship_down": (column[0], row[7]),
"ship_left": (column[1], row[7]),
"ship_up": (column[2], row[7]),
"ship_right": (column[3], row[7]),
"ship_down_masked": (column[4], row[7]),
"ship_left_masked": (column[5], row[7]),
"ship_up_masked": (column[6], row[7]),
"ship_right_masked": (column[7], row[7]),
"alien_down": (column[0], row[8]),
"alien_left": (column[1], row[8]),
"alien_up": (column[2], row[8]),
"alien_right": (column[3], row[8]),
"alien_down_masked": (column[4], row[8]),
"alien_left_masked": (column[5], row[8]),
"alien_up_masked": (column[6], row[8]),
"alien_right_masked": (column[7], row[8]),
# amoeba row
"tile": (column[0], row[10]),
"chip": (column[1], row[10]),
"chip_gate": (column[2], row[10]),
# corner wall things
"button_red": (column[0], row[11]),
"button_blue": (column[1], row[11]),
"button_green": (column[2], row[11]),
"button_grey": (column[3], row[11]),
"info": (column[4], row[11]),
# emitters? Big buttons?
"wall": (column[0], row[12]),
# "wall_blue": (column[1], row[12]),
# "wall_pretty": (column[3], row[12]),
"door_red": (column[4], row[12]),
"door_blue": (column[5], row[12]),
"door_yellow": (column[6], row[12]),
"door_green": (column[7], row[12]),
"water": (column[0], row[13]),
"ice": (column[1], row[13]),
"box_pushable": (column[2], row[12]),
"box_submerged": (column[2], row[13]),
# "static": (column[3], row[13]),
"ice_top_left": (column[4], row[13]),
"ice_top_right": (column[5], row[13]),
"ice_bottom_right": (column[6], row[13]),
"ice_bottom_left": (column[7], row[13]),
"slide_down": (column[0], row[14]),
"slide_left": (column[1], row[14]),
"slide_up": (column[2], row[14]),
"slide_right": (column[3], row[14]),
"slide_spiral": (column[4], row[14]),
"fire": (column[5], row[14]),
# "bomb": (column[6], row[14]),
# "theif": (column[7], row[14]),
# "magic_wall": (column[0], row[15]),
# "magic_tile": (column[1], row[15]),
"exit": (column[2], row[15]),
# "unused": (column[3], row[15]),
# "unused": (column[4], row[15]),
# "blue": (column[7], row[15]),
}
|
[
"jordan.slaman@gmail.com"
] |
jordan.slaman@gmail.com
|
73873758c4fae5b30bbd029eb70045763949697f
|
bc758176773f988876d408880b2288de0a1a1532
|
/manage.py
|
2786e300b207f85a923c4a6d888b9f6294bb6853
|
[] |
no_license
|
15101538237ren/traffic_prediction
|
171e8ed66d5d0462e18d635e4380cbac566c5123
|
5e463a098cd4f6439cd5fcf6d7ae6bb36a6536de
|
refs/heads/master
| 2021-03-19T16:35:13.074763
| 2018-04-11T03:31:01
| 2018-04-11T03:31:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 261
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "traffic_prediction.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"renhongleiz@126.com"
] |
renhongleiz@126.com
|
fd33cdef41235e78f40026197c9486d4bb1b798e
|
0fccee4c738449f5e0a8f52ea5acabf51db0e910
|
/genfragments/ThirteenTeV/PythiaChargedResonance_WG/PythiaChargedResonance_WGToLNuG_M2600_width5_13TeV-pythia8_cff.py
|
83509f85ddff909d3050b28a383fc39e126a530a
|
[] |
no_license
|
cms-sw/genproductions
|
f308ffaf3586c19b29853db40e6d662e937940ff
|
dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4
|
refs/heads/master
| 2023-08-30T17:26:02.581596
| 2023-08-29T14:53:43
| 2023-08-29T14:53:43
| 11,424,867
| 69
| 987
| null | 2023-09-14T12:41:28
| 2013-07-15T14:18:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,133
|
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
comEnergy = cms.double(13000.0),
filterEfficiency = cms.untracked.double(1),
maxEventsToPrint = cms.untracked.int32(0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(0),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
"37:onMode = off",
"37:addChannel = 1 0.00001 101 24 22",
"37:onIfMatch = 24 22",
"37:m0 = 2600",
"37:doForceWidth = on",
"37:mWidth = 130.000000",
"24:onMode = off",
"24:onIfAny = 11 13 15",
"Higgs:useBSM = on",
"HiggsBSM:ffbar2H+- = on"),
parameterSets = cms.vstring(
"pythia8CommonSettings",
"pythia8CUEP8M1Settings",
"processParameters")
)
)
|
[
"shubhanshu.chauhan.cern.ch"
] |
shubhanshu.chauhan.cern.ch
|
19ff523329804f16e2b6d3094c39cb90e0bf0d56
|
9ed4d46aedd4d4acadb48d610e940594b5b7b3fd
|
/electronics/resistor_equivalence.py
|
55e7f2d6b5d2bd87baae998228830e13073f1456
|
[
"MIT"
] |
permissive
|
TheAlgorithms/Python
|
7596a0e236ed12a61f9db19a7ea68309779cc85b
|
421ace81edb0d9af3a173f4ca7e66cc900078c1d
|
refs/heads/master
| 2023-09-01T17:32:20.190949
| 2023-08-29T13:18:10
| 2023-08-29T13:18:10
| 63,476,337
| 184,217
| 48,615
|
MIT
| 2023-09-14T02:05:29
| 2016-07-16T09:44:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,593
|
py
|
# https://byjus.com/equivalent-resistance-formula/
from __future__ import annotations
def resistor_parallel(resistors: list[float]) -> float:
"""
Req = 1/ (1/R1 + 1/R2 + ... + 1/Rn)
>>> resistor_parallel([3.21389, 2, 3])
0.8737571620498019
>>> resistor_parallel([3.21389, 2, -3])
Traceback (most recent call last):
...
ValueError: Resistor at index 2 has a negative or zero value!
>>> resistor_parallel([3.21389, 2, 0.000])
Traceback (most recent call last):
...
ValueError: Resistor at index 2 has a negative or zero value!
"""
first_sum = 0.00
index = 0
for resistor in resistors:
if resistor <= 0:
msg = f"Resistor at index {index} has a negative or zero value!"
raise ValueError(msg)
first_sum += 1 / float(resistor)
index += 1
return 1 / first_sum
def resistor_series(resistors: list[float]) -> float:
"""
Req = R1 + R2 + ... + Rn
Calculate the equivalent resistance for any number of resistors in parallel.
>>> resistor_series([3.21389, 2, 3])
8.21389
>>> resistor_series([3.21389, 2, -3])
Traceback (most recent call last):
...
ValueError: Resistor at index 2 has a negative value!
"""
sum_r = 0.00
index = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
msg = f"Resistor at index {index} has a negative value!"
raise ValueError(msg)
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
|
[
"noreply@github.com"
] |
TheAlgorithms.noreply@github.com
|
b8f2f746520ef84dcec1b5ea9dbfb14196e27a81
|
37efda4646f478b66674e384e1bc139e7874d972
|
/practice/String.py
|
ee6e9b1a4544ded7ff46560e1f056471c96e2855
|
[] |
no_license
|
siberian122/kyoupuro
|
02c1c40f7c09ff0c07a1d50b727f860ad269d8b1
|
8bf5e5b354d82f44f54c80f1fc014c9519de3ca4
|
refs/heads/master
| 2023-04-04T02:45:29.445107
| 2021-04-20T07:37:47
| 2021-04-20T07:37:47
| 299,248,378
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 152
|
py
|
s=input()
t=input()
Flag=False
for _ in range(len(s)):
s=s[-1]+s[:-1]
if s==t:
Flag=True
if Flag:
print('Yes')
else:
print('No')
|
[
"siberian1000@gmail.com"
] |
siberian1000@gmail.com
|
a44ec4ba2cddc599611798c656eaece5002be537
|
4da66858f5278af1b1c9e1d0099a9e422400dcfa
|
/python_spider/test_spider/test_spider.py
|
f00b98fe900c4783aa078041a49e1ee89314edce
|
[] |
no_license
|
hi-noikiy/TestProject
|
62eb719a113b15be98a099e099afbb20f99d7062
|
e37617c9f1bee058c196f776451c1d81d9fd5f86
|
refs/heads/master
| 2022-01-05T03:00:05.498931
| 2018-09-26T09:28:10
| 2018-09-26T09:28:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 306
|
py
|
import requests
from lxml import etree
html = etree.parse('hello.html')
print(type(html))
result = html.xpath('//li')
print(result)
print(len(result))
print(type(result))
print(type(result[0]))
result1 = html.xpath('//li/@class')
print(result1)
# res = etree.tostring(html)
# print(res)
|
[
"401491197@qq.com"
] |
401491197@qq.com
|
6ca0aacda5d9852d4c3a3174f5af3c2e3a61a37f
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5686275109552128_0/Python/SozoS/B.py
|
c5247b1e1c0f2a3ec047c25e3d0c6464b0df70d0
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,305
|
py
|
# Author: Davin Choo
# Email: cxjdavin@gmail.com
# Observations:
# Doing all the splits before eating cannot make the solution worse
# Always splitting to an empty plate cannot make the solution worse
# So, we always split first, and split to empty plate
# (Exploit fact that we have infinite customers)
# Approach: Brute force search
# Set a maximum height to allow customers to eat at
# Eat for the lowest stack height given that constraint
T = int(raw_input())
for i in xrange(0, T):
D = int(raw_input())
temp = raw_input().split()
sizes = {}
largest = 0
for j in xrange(0, D):
pj = int(temp[j])
largest = max(largest, pj)
if pj not in sizes:
sizes[pj] = 0
sizes[pj] += 1
# Sort pancake stack height in descending order
descending = sorted(sizes, reverse = True)
# Brute force check all possibilities
best_time = largest
best_height = largest
for h in xrange(largest-1, 1, -1):
current_time = 0
# Split
for d in descending:
if d <= h:
break
else: # d > h
if d % h == 0:
current_time += sizes[d] * (d/h-1)
else:
current_time += sizes[d] * d/h
# Eat
current_time += h
# Update
if current_time < best_time:
best_time = current_time
best_height = h
# print best_height
print "Case #" + str(i+1) + ": " + str(best_time)
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
f865d11508ed722439228027809b46b8c7a88b52
|
a25b18e3fd20e0b14f4c852a956b7471c0fb11c9
|
/7_14_longest_public_prefix.py
|
411f468a7c368a845760dd4a1bcf2458766d644a
|
[] |
no_license
|
Kiriyamaku/learning_algorithm
|
efd8bc18b9cce978e30dfd4cd2e48d451a3e8429
|
6a1886477c0cccf24b077d175804866f2c76028a
|
refs/heads/master
| 2022-12-11T11:13:44.736019
| 2020-08-22T03:36:49
| 2020-08-22T03:36:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 649
|
py
|
class Solution(object):
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
result=[]
for index,letter in enumerate(strs[0]):
print(strs[1:])
for word in strs[1:]:
print(word)
print(index-1)
print(len(word))
if index-1<=len(word) or letter != word[index]:
break
break
result.append(letter)
print(result)
if __name__ == "__main__":
a=Solution()
test_case=["flower","flow","flight"]
a.longestCommonPrefix(test_case)
|
[
"mac@macdeMacBook-Pro.local"
] |
mac@macdeMacBook-Pro.local
|
6892bb43785b7888d4248352098f68fab19ad9bd
|
3879d1ca43c573c209f962182cd1e7f7fe978fbf
|
/leetcode/2011. Final Value of Variable After Performing Operations/2011.py
|
0af1a0518557b593bab1bfbcfcc10daf5e12eebf
|
[] |
no_license
|
DoctorLai/ACM
|
34a5600a5adf22660c5d81b2d8b7a358be537ecf
|
aefa170f74c55c1230eb6f352770512b1e3f469e
|
refs/heads/master
| 2023-09-01T02:13:01.604508
| 2023-08-31T15:42:07
| 2023-08-31T15:42:07
| 146,173,024
| 62
| 18
| null | 2020-10-11T13:19:57
| 2018-08-26T11:00:36
|
C++
|
UTF-8
|
Python
| false
| false
| 393
|
py
|
# https://helloacm.com/teaching-kids-programming-final-value-of-variable-after-performing-operations-via-reduce-function/
# https://leetcode.com/problems/final-value-of-variable-after-performing-operations/
# EASY, LAMBDA
class Solution:
def finalValueAfterOperations(self, operations: List[str]) -> int:
return reduce(lambda a, b: a + 1 if b[1] == '+' else a - 1, operations, 0)
|
[
"noreply@github.com"
] |
DoctorLai.noreply@github.com
|
d9ff25c429389600a7664368ae7dfe218df68cf7
|
bc02e2c69f425e03b609f466b0a2d52a455765dc
|
/0524/정수 삼각형_김현성.py
|
aecf9418b3d76d01db1b968653c3924d5a3df0dd
|
[] |
no_license
|
gaberani/AlgorithmStudy
|
d795f449fe185c3993df90173f27b7eb74e02366
|
6d9d20ac29446d22f2e0ef7037f131c4a2f48762
|
refs/heads/master
| 2023-02-03T03:24:45.039238
| 2020-12-22T12:53:52
| 2020-12-22T12:53:52
| 287,101,596
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 428
|
py
|
n = int(input())
tri = [list(map(int, input().split())) for _ in range(n)]
# print(tri)
# 맨 왼쪽이면 무조건 자신 오른쪽 위를 더함
for i in range(1, n): # 첫번째 줄은 뺌
for j in range(i+1):
if j == 0:
tri[i][j] += tri[i-1][j]
elif j == i:
tri[i][j] += tri[i-1][j-1]
else:
tri[i][j] += max(tri[i-1][j-1], tri[i-1][j])
print(max(tri[n-1]))
|
[
"khs0783@naver.com"
] |
khs0783@naver.com
|
4d73d8011aa6a57fb0f1a14eb63edbfd1dc93fdd
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/5WvxKLK55JvT8NLfR_12.py
|
93e7a18be50b5ea02c4e61e23f3ac3de275208a6
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,833
|
py
|
"""
A square matrix (same number of rows as columns) is called _row diagonally
dominant_ if "the absolute value of each entry in the main diagonal is
strictly larger than the sum of the absolute values of the other entries in
that row".
To illustrate ...
[
[10, 3, 6],
[2, -9, -6],
[1, -1, 4]
]
The absolute values from top left to bottom right are:
* `10` = First item of first row.
* `9` = Second item of second row.
* `4` = Third item of third row.
... making a _row diagonal dominant_ total of 23.
**In the first row ...**
* The value of the _row diagonal dominant_ is `10`.
* The sum of the other absolute values are `3` and `6` make a total of `9`.
... so far, the matrix is _row diagonally dominant_ , since `10 > 9`.
**In the second row ...**
* The value of the _row diagonal dominant_ is `9`.
* The sum of the other absolute values in the second row are `3` and `6` which make a total of `9`.
... meaning the matrix is not _row diagonally dominant_ since `9 <= 9`.
[
[10, 3, 6],
[3, -9, -6],
[1, -1, 4]
]
For a square to be _row diagonally dominant_ , all of the rows in the square
have to be like Row 1.
Write a function that determines if a given square matrix is row diagonally
dominant.
### Examples
diag_dom([
[2, -1],
[-1, 2]
]) ➞ True
diag_dom([
[0, 1],
[1, 0]
]) ➞ False
diag_dom([
[10, 3, 6],
[2, -9, -6],
[1, -1, 4]
]) ➞ True
diag_dom([
[10, 3, 6],
[4, -9, -6],
[1, -1, 4]
]) ➞ False
### Notes
As in the examples, the size of the matrices will change, but they will always
be square.
"""
def diag_dom(arr):
return all(sum(abs(x) for x in row) < 2*abs(arr[i][i]) for i, row in zip(range(len(arr)), arr))
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
0427134f48809a05a4591c82d7d171b9583a9c2a
|
dd72348fc25e5a844e18f4fd6eff326a143f5852
|
/Chapter 02/nn_architecture/mnist.py
|
c7fbf893f7d04056d47b9044f7316181168b8617
|
[
"MIT"
] |
permissive
|
bpbpublications/Time-Series-Forecasting-using-Deep-Learning
|
3d76a74b0106decefa8a6fb27e3379211416d928
|
fd84553d33e912edb4a1400af0f9374e72747457
|
refs/heads/main
| 2023-09-02T23:42:19.707131
| 2021-10-19T16:45:28
| 2021-10-19T16:45:28
| 409,091,964
| 20
| 13
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 763
|
py
|
import torch.nn as nn
import torch.nn.functional as F
class MnistModel(nn.Module):
def __init__(self):
super(MnistModel, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size = 5)
self.conv2 = nn.Conv2d(10, 20, kernel_size = 5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = self.conv1(x)
x = F.max_pool2d(x, 2)
x = F.relu(x)
x = self.conv2(x)
x = self.conv2_drop(x)
x = F.max_pool2d(x, 2)
x = F.relu(x)
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training = self.training)
x = self.fc2(x)
return F.log_softmax(x)
|
[
"41231825+bpbpublications@users.noreply.github.com"
] |
41231825+bpbpublications@users.noreply.github.com
|
ad11f693f8998e904ba57a59f2b2a3e5b9cbdab4
|
e82b761f53d6a3ae023ee65a219eea38e66946a0
|
/All_In_One/addons/InteractionOps/operators/iops.py
|
414d2054a0fa5bc9c908cc9342fe8a541f7004ff
|
[] |
no_license
|
2434325680/Learnbgame
|
f3a050c28df588cbb3b14e1067a58221252e2e40
|
7b796d30dfd22b7706a93e4419ed913d18d29a44
|
refs/heads/master
| 2023-08-22T23:59:55.711050
| 2021-10-17T07:26:07
| 2021-10-17T07:26:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,756
|
py
|
import bpy
class IOPS(bpy.types.Operator):
bl_idname = "iops.main"
bl_label = "IOPS"
bl_options = {"REGISTER", "UNDO"}
modes_3d = {0: "VERT", 1: "EDGE", 2: "FACE"}
modes_uv = {0: "VERTEX", 1: "EDGE", 2: "FACE", 3: "ISLAND"}
modes_gpen = {0: "EDIT_GPENCIL", 1: "PAINT_GPENCIL", 2: "SCULPT_GPENCIL"}
modes_curve = {0: "EDIT_CURVE"}
modes_text = {0: "EDIT_TEXT"}
modes_meta = {0: "EDIT_META"}
modes_lattice = {0: "EDIT_LATTICE"}
modes_armature = {0: "EDIT", 1: "POSE"}
supported_types = {"MESH", "CURVE", "GPENCIL", "EMPTY", "TEXT", "META", "ARMATURE", "LATTICE"}
# Current mode
_mode_3d = ""
_mode_uv = ""
_mode_gpen = ""
_mode_curve = ""
_mode_text = ""
_mode_meta = ""
_mode_armature = ""
_mode_lattice = ""
@classmethod
def poll(cls, context):
return context.object is not None
def get_mode_3d(self, tool_mesh):
mode = ""
if tool_mesh[0]:
mode = "VERT"
elif tool_mesh[1]:
mode = "EDGE"
elif tool_mesh[2]:
mode = "FACE"
return mode
def execute(self, context):
# Object <-> Mesh
scene = bpy.context.scene
tool = bpy.context.tool_settings
tool_mesh = scene.tool_settings.mesh_select_mode
active_object = bpy.context.view_layer.objects.active
if active_object.type == "MESH":
_mode_3d = self.get_mode_3d(tool_mesh)
if (bpy.context.area.type == "VIEW_3D" or
(bpy.context.area.type == "IMAGE_EDITOR" and
tool.use_uv_select_sync is True)):
# Same modes for active sync in UV
# Go to Edit Mode
if bpy.context.mode == "OBJECT":
bpy.ops.object.mode_set(mode="EDIT")
bpy.ops.mesh.select_mode(type=self._mode_3d)
_mode_3d = self._mode_3d
# self.report({"INFO"}, _mode_3d)
return{"FINISHED"}
# Switch selection modes
# If activated same selection mode again switch to Object Mode
if (bpy.context.mode == "EDIT_MESH" and self._mode_3d != _mode_3d):
bpy.ops.mesh.select_mode(type=self._mode_3d)
_mode_3d = self._mode_3d
# self.report({"INFO"}, _mode_3d)
return{"FINISHED"}
else:
bpy.ops.object.mode_set(mode="OBJECT")
# self.report({"INFO"}, "OBJECT MODE")
return{"FINISHED"}
# UV <-> Mesh
if bpy.context.area.type == "IMAGE_EDITOR":
# Go to Edit Mode and Select All
bpy.ops.object.mode_set(mode="EDIT")
bpy.ops.mesh.select_all(action="SELECT")
tool.uv_select_mode = self._mode_uv
_mode_uv = self._mode_uv
# self.report({"INFO"}, _mode_uv)
return{"FINISHED"}
elif self._mode_uv != _mode_uv:
tool.uv_select_mode = self._mode_uv
_mode_uv = self._mode_uv
# self.report({"INFO"}, _mode_uv)
return{"FINISHED"}
else:
bpy.ops.object.mode_set(mode="OBJECT")
# self.report({"INFO"}, "OBJECT MODE")
return{"FINISHED"}
# Object <-> Curve
if active_object.type == "CURVE":
_mode_curve = "EDIT" if bpy.context.mode != "EDIT_CURVE" else "OBJECT"
bpy.ops.object.mode_set(mode=_mode_curve)
# self.report({"INFO"}, _mode_curve)
return{"FINISHED"}
# Object <-> GPencil
if active_object.type == "GPENCIL":
_mode_gpen = active_object.mode
if (bpy.context.area.type == "VIEW_3D"):
if bpy.context.mode == "OBJECT":
_mode_gpen = self._mode_gpen
bpy.ops.object.mode_set(mode=_mode_gpen)
# self.report({"INFO"}, _mode_gpen)
return{"FINISHED"}
elif self._mode_gpen != _mode_gpen:
bpy.ops.object.mode_set(mode=self._mode_gpen)
_mode_gpen = self._mode_gpen
# self.report({"INFO"}, _mode_gpen)
return{"FINISHED"}
else:
bpy.ops.object.mode_set(mode="OBJECT")
# self.report({"INFO"}, "OBJECT MODE")
return{"FINISHED"}
return{"FINISHED"}
# Object <-> Text
if active_object.type == "FONT":
_mode_text = "EDIT" if bpy.context.mode != "EDIT_TEXT" else "OBJECT"
bpy.ops.object.mode_set(mode=_mode_text)
# self.report({"INFO"}, _mode_text)
return{"FINISHED"}
# Object <-> Meta
if active_object.type == "META":
_mode_meta = "EDIT" if bpy.context.mode != "EDIT_META" else "OBJECT"
# bpy.ops.object.mode_set(mode=_mode_meta)
bpy.ops.object.editmode_toggle()
# self.report({"INFO"}, _mode_meta)
return{"FINISHED"}
# Object <-> Armature
if active_object.type == "ARMATURE":
_mode_armature = active_object.mode
if (bpy.context.area.type == "VIEW_3D"):
if bpy.context.mode == "OBJECT":
_mode_armature = self._mode_armature
bpy.ops.object.mode_set(mode=_mode_armature)
# self.report({"INFO"}, _mode_armature)
return{"FINISHED"}
elif self._mode_armature != _mode_armature:
bpy.ops.object.mode_set(mode=self._mode_armature)
_mode_armature = self._mode_armature
# self.report({"INFO"}, _mode_armature)
return{"FINISHED"}
else:
bpy.ops.object.mode_set(mode="OBJECT")
# self.report({"INFO"}, "OBJECT MODE")
return{"FINISHED"}
return{"FINISHED"}
# Object <-> Lattice
if active_object.type == "LATTICE":
_mode_lattice = "EDIT" if bpy.context.mode != "EDIT_LATTICE" else "OBJECT"
bpy.ops.object.mode_set(mode=_mode_lattice)
# self.report({"INFO"}, _mode_lattice)
return{"FINISHED"}
# Unsupported Types
if active_object.type not in self.supported_types:
self.report({"INFO"}, "Object type " + str(active_object.type) + " not supported by iOps!")
return{"FINISHED"}
return{"FINISHED"}
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
7ff23331c1eb0d2eebc99527903657b00726812c
|
496a63f41fa32e2bb3ecce0d35ff4374f1c02ad5
|
/src/scripting/parser/combinator/phrase.py
|
5ea68e0c555f1e424e15a43ff0aa9d39dd4eee26
|
[
"BSD-3-Clause"
] |
permissive
|
vincent-lg/avenew.one
|
bbfa8d44e68db943b8825e9d4a32a43e985778fe
|
fb7f98d331e47e2032ee1e51bf3e4b2592807fdf
|
refs/heads/main
| 2023-02-14T00:28:53.511552
| 2021-01-13T11:13:07
| 2021-01-13T11:13:07
| 330,207,053
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,428
|
py
|
# Copyright (c) 2020-20201, LE GOFF Vincent
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
"""Phrase combinator."""
from scripting.exceptions import ParseError
from scripting.parser.parser import Parser
class Phrase(Parser):
"""
Parser for a phrase.
A phrase is just a wrapper around a sub-parser. Its only
modification is that, when the sub-parser is done, the phrase
will raise an error if there are non-parsed tokens.
"""
def __init__(self, parser):
self.parser = parser
async def process(self, tokens):
"""Process the given tokens."""
result = await self.parser.process(tokens)
if tokens.empty(check_cursor=True):
return result
tokens.parse_error("incomplete input")
def repr(self, seen=None):
"""Return the parser's representation as a string."""
seen = seen or []
seen.append(self)
return "phrase" + self.parser.repr(seen=seen)
|
[
"vincent.legoff.srs@gmail.com"
] |
vincent.legoff.srs@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.