blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e0921d86405956f11e9c99f975f6602dfe3062d4
|
498e99bae2b0a107a4f1c8563a74470e8516f465
|
/apps/project/forms.py
|
4a69db81801e8540a92517735406212b5511a46e
|
[
"MIT"
] |
permissive
|
xiaoxiaolulu/MagicTestPlatform
|
91bcf9125c4c7f254bf8aaf425b7c72ca40b7a49
|
dc9b4c55f0b3ace180c30b7f080eb5d88bb38fdb
|
refs/heads/master
| 2022-05-29T00:05:48.030392
| 2020-01-20T09:16:44
| 2020-01-20T09:16:44
| 219,256,372
| 5
| 1
|
MIT
| 2022-05-25T02:24:22
| 2019-11-03T05:31:53
|
Python
|
UTF-8
|
Python
| false
| false
| 2,153
|
py
|
"""
项目管理模块表单验证器
~~~~~~~~~~~~~~~~~~~~~~~~~~~
DESCRIPTION
:copyright: (c) 2019 by Null.
"""
from wtforms import (
StringField,
TextAreaField,
IntegerField
)
from wtforms.validators import DataRequired
from wtforms_tornado import Form
class ProjectForm(Form):
name = StringField("项目名称", validators=[DataRequired("请输入项目名称")])
env = IntegerField("测试环境", validators=[DataRequired("请选择环境")])
desc = TextAreaField("项目描述", validators=[DataRequired(message="请输入项目描述")])
class TestEnvironmentForm(Form):
name = StringField("测试环境名称", validators=[DataRequired("请输入测试环境名称")])
host_address = StringField("测试环境地址", validators=[DataRequired("请输入测试环境地址")])
desc = TextAreaField("测试环境描述", validators=[DataRequired(message="请输入测试环境描述")])
class DBSettingForm(Form):
name = StringField("数据库名称", validators=[DataRequired("请输入数据库名称")])
db_type = StringField("数据库类型", validators=[DataRequired("请输入数据库类型")])
db_user = StringField("数据库账号", validators=[DataRequired("请输入数据库账号")])
db_password = StringField("数据库密码", validators=[DataRequired("请输入数据库密码")])
db_host = StringField("数据库境地址", validators=[DataRequired("请输入数据库地址")])
db_port = IntegerField("数据库端口号", validators=[DataRequired("请输入数据库端口号")])
desc = TextAreaField("数据库描述", validators=[DataRequired(message="请输入数据库描述")])
class FunctionGeneratorForm(Form):
name = StringField("函数名称", validators=[DataRequired("请输入函数名称")])
function = StringField("函数方法", validators=[DataRequired("请输入方法名称")])
desc = TextAreaField("方法描述", validators=[DataRequired(message="请输入方法描述")])
class FunctionDebugForm(Form):
function = StringField("函数方法", validators=[DataRequired("请输入方法名称")])
|
[
"546464268@qq.com"
] |
546464268@qq.com
|
51762a5493480f20d4c0ec1971b890da62221661
|
10e94d77e56d9cbb979174795c465b679d03d6b3
|
/tensorflow/contrib/session_bundle/example/export_half_plus_two.py
|
a17617db12e50c46eebfbd7fd97418342833e856
|
[
"Apache-2.0"
] |
permissive
|
pint1022/tf-coriander
|
68939732c1ec0f052929c13ef6d8f49e44d423e4
|
197a685accca4a3f38285d6ac3ccf3998a200090
|
refs/heads/master
| 2020-04-14T18:56:40.334257
| 2019-01-11T00:40:11
| 2019-01-11T00:40:11
| 164,038,861
| 1
| 0
|
Apache-2.0
| 2019-01-04T00:53:40
| 2019-01-04T00:53:40
| null |
UTF-8
|
Python
| false
| false
| 5,405
|
py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exports a toy linear regression inference graph.
Exports a TensorFlow graph to /tmp/half_plus_two/ based on the Exporter
format.
This graph calculates,
y = a*x + b
where a and b are variables with a=0.5 and b=2.
Output from this program is typically used to exercise Session
loading and execution code.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.session_bundle import exporter
tf.app.flags.DEFINE_string("export_dir", "/tmp/half_plus_two",
"Directory where to export inference model.")
FLAGS = tf.app.flags.FLAGS
def Export():
with tf.Session() as sess:
# Make model parameters a&b variables instead of constants to
# exercise the variable reloading mechanisms.
a = tf.Variable(0.5, name="a")
b = tf.Variable(2.0, name="b")
# Create a placeholder for serialized tensorflow.Example messages to be fed.
serialized_tf_example = tf.placeholder(tf.string, name="tf_example")
# Parse the tensorflow.Example looking for a feature named "x" with a single
# floating point value.
feature_configs = {"x": tf.FixedLenFeature([1], dtype=tf.float32),}
tf_example = tf.parse_example(serialized_tf_example, feature_configs)
# Use tf.identity() to assign name
x = tf.identity(tf_example["x"], name="x")
# Calculate, y = a*x + b
y = tf.add(tf.mul(a, x), b, name="y")
# Setup a standard Saver for our variables.
save = tf.train.Saver({"a": a, "b": b}, sharded=True)
# asset_path contains the base directory of assets used in training (e.g.
# vocabulary files).
original_asset_path = tf.constant("/tmp/original/export/assets")
# Ops reading asset files should reference the asset_path tensor
# which stores the original asset path at training time and the
# overridden assets directory at restore time.
asset_path = tf.Variable(original_asset_path,
name="asset_path",
trainable=False,
collections=[])
assign_asset_path = asset_path.assign(original_asset_path)
# Use a fixed global step number.
global_step_tensor = tf.Variable(123, name="global_step")
# Create a RegressionSignature for our input and output.
regression_signature = exporter.regression_signature(
input_tensor=serialized_tf_example,
# Use tf.identity here because we export two signatures here.
# Otherwise only graph for one of the signatures will be loaded
# (whichever is created first) during serving.
output_tensor=tf.identity(y))
named_graph_signature = {
"inputs": exporter.generic_signature({"x": x}),
"outputs": exporter.generic_signature({"y": y})
}
# Create two filename assets and corresponding tensors.
# TODO(b/26254158) Consider adding validation of file existance as well as
# hashes (e.g. sha1) for consistency.
original_filename1 = tf.constant("hello1.txt")
tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, original_filename1)
filename1 = tf.Variable(original_filename1,
name="filename1",
trainable=False,
collections=[])
assign_filename1 = filename1.assign(original_filename1)
original_filename2 = tf.constant("hello2.txt")
tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, original_filename2)
filename2 = tf.Variable(original_filename2,
name="filename2",
trainable=False,
collections=[])
assign_filename2 = filename2.assign(original_filename2)
# Init op contains a group of all variables that we assign.
init_op = tf.group(assign_asset_path, assign_filename1, assign_filename2)
# CopyAssets is used as a callback during export to copy files to the
# given export directory.
def CopyAssets(filepaths, export_path):
print("copying asset files to: %s" % export_path)
for filepath in filepaths:
print("copying asset file: %s" % filepath)
# Run an export.
tf.initialize_all_variables().run()
export = exporter.Exporter(save)
export.init(
sess.graph.as_graph_def(),
init_op=init_op,
default_graph_signature=regression_signature,
named_graph_signatures=named_graph_signature,
assets_collection=tf.get_collection(tf.GraphKeys.ASSET_FILEPATHS),
assets_callback=CopyAssets)
export.export(FLAGS.export_dir, global_step_tensor, sess)
def main(_):
Export()
if __name__ == "__main__":
tf.app.run()
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
fd163de569441d3f1a78fed668627ac3739d01cf
|
bc441bb06b8948288f110af63feda4e798f30225
|
/resource_manage_sdk/model/container/ingress_rule_pb2.pyi
|
90c7370882a3274a625add18341209e9ed58abc9
|
[
"Apache-2.0"
] |
permissive
|
easyopsapis/easyops-api-python
|
23204f8846a332c30f5f3ff627bf220940137b6b
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
refs/heads/master
| 2020-06-26T23:38:27.308803
| 2020-06-16T07:25:41
| 2020-06-16T07:25:41
| 199,773,131
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,236
|
pyi
|
# @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from resource_manage_sdk.model.container.http_ingress_path_pb2 import (
HTTPIngressPath as resource_manage_sdk___model___container___http_ingress_path_pb2___HTTPIngressPath,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class IngressRule(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
class Http(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
@property
def paths(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[resource_manage_sdk___model___container___http_ingress_path_pb2___HTTPIngressPath]: ...
def __init__(self,
*,
paths : typing___Optional[typing___Iterable[resource_manage_sdk___model___container___http_ingress_path_pb2___HTTPIngressPath]] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> IngressRule.Http: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> IngressRule.Http: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"paths",b"paths"]) -> None: ...
host = ... # type: typing___Text
@property
def http(self) -> IngressRule.Http: ...
def __init__(self,
*,
host : typing___Optional[typing___Text] = None,
http : typing___Optional[IngressRule.Http] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> IngressRule: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> IngressRule: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"http",b"http"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"host",b"host",u"http",b"http"]) -> None: ...
|
[
"service@easyops.cn"
] |
service@easyops.cn
|
084424cd5ba296c1622253af22231705a68e5b7b
|
1a9a62b3feb53c7f87352587a774eb772948ebc9
|
/service2/application/routes.py
|
405564d539c122c26757d1b65970fc1ed9043f10
|
[] |
no_license
|
vuchenna/SFIAproject2
|
ff1e643cec2947931176af2f77d7b24032d80aed
|
a5d82331636f49d3f1978989eb5428b4a20e57a8
|
refs/heads/master
| 2022-12-24T13:09:05.392030
| 2020-03-09T16:36:39
| 2020-03-09T16:36:39
| 244,886,380
| 0
| 0
| null | 2022-12-08T03:46:07
| 2020-03-04T11:45:54
|
Python
|
UTF-8
|
Python
| false
| false
| 462
|
py
|
from application import app, db
from application.models import Gender
#from application.forms import GenerateForm
import random,requests
@app.route('/')
@app.route('/home')
def home():
return render_template('home.html', title='Home')
@app.route('/gender', methods=['GET', 'POST'])
def gender():
id = random.randint(0,2)
#gender = Gender.query.filter_by(id=id).first()
#g = str(gender.id)
g = str(id)
return g
#return(selectgender)
|
[
"you@example.com"
] |
you@example.com
|
79ecbaa165b4ab251b36e00d45242e63bfd51e85
|
0487c30d3d2a26ee62eb9e82c1b1e6edb7cb8b36
|
/tests/sai_qualify/community_cases.py
|
b21af337103cbad72b54347c09e156056917d5e7
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
gord1306/sonic-mgmt
|
e4047cbcdb600591816215e765c7f30664cc4543
|
05094321ed58270ac06d1a0ef575a4ab9ea3ddd6
|
refs/heads/master
| 2022-12-17T08:05:58.944208
| 2022-06-06T02:34:48
| 2022-06-06T02:34:48
| 195,778,851
| 1
| 0
|
NOASSERTION
| 2019-07-08T09:21:07
| 2019-07-08T09:21:07
| null |
UTF-8
|
Python
| false
| false
| 5,025
|
py
|
COMMUN_TEST_CASE = [
"saiacl.IPAclTest",
"saiacl.MACSrcAclTest",
"saiacl.L3AclTest",
"saiacl.SeqAclTableGroupTest",
"saiacl.MultBindAclTableGroupTest",
"saiacl.BindAclTableInGroupTest",
"saiacl.L3AclTableTestI",
"saiacl.L3AclTableGroupTestI",
"saiacl.L3AclTableGroupTestII",
"saiacl.L3AclTableTestII",
"saidebugcounters.DropMCSMAC",
"saidebugcounters.DropSMACequalsDMAC",
"saidebugcounters.DropDMACReserved",
"saidebugcounters.DropIngressVLANFilter",
"saidebugcounters.DropL2LoopbackFilter",
"saidebugcounters.DropL3LoopbackFilter",
"saidebugcounters.DropNonRoutable",
"saidebugcounters.DropNoL3Header",
"saidebugcounters.DropIPHeaderError",
"saidebugcounters.DropUCDIPMCDMAC",
"saidebugcounters.DropDIPLoopback",
"saidebugcounters.DropSIPLoopback",
"saidebugcounters.DropMulticastSIP",
"saidebugcounters.DropSIPClassE",
"saidebugcounters.DropSIPUnspecified",
"saidebugcounters.DropMCDMACMismatch",
"saidebugcounters.DropSIPEqualsDIP",
"saidebugcounters.DropSIPBC",
"saidebugcounters.DropDIPLocal",
"saidebugcounters.DropDIPLinkLocal",
"saidebugcounters.DropSIPLinkLocal",
"saidebugcounters.DropIPv6MCScope0",
"saidebugcounters.DropIPv6MCScope1",
"saidebugcounters.DropIRIFDisabled",
"saidebugcounters.DropERIFDisabled",
"saidebugcounters.DropLPM4Miss",
"saidebugcounters.DropLPM6Miss",
"saidebugcounters.DropBlackholeRoute",
"saidebugcounters.DropACLAny",
"saidebugcounters.NoDropIngressVLANFilter",
"saidebugcounters.DropMultipleReasons",
"saidebugcounters.EditingDropReasons",
"saifdb.L2FDBMissUnicastTest",
"saifdb.L2FDBMissBroadcastTest",
"saihostif.NoPolicyTest",
"saihostif.PolicyTest",
"saihostif.ARPTest",
"saihostif.DHCPTest",
"saihostif.LLDPTest",
"saihostif.LACPTest",
"saihostif.SNMPTest",
"saihostif.SSHTest",
"saihostif.IP2METest",
"saihostif.TTLErrorTest",
"saihostif.BGPTest",
"sail2.L2AccessToAccessVlanTest",
"sail2.L2TrunkToTrunkVlanTest",
"sail2.L2AccessToTrunkVlanTest",
"sail2.L2TrunkToAccessVlanTest",
"sail2.L2FloodTest",
"sail2.L2LagTest",
"sail2.LagHashseedTest",
"sail2.L2VlanBcastUcastTest",
"sail2.L2FdbAgingTest",
"sail2.L2ARPRequestReplyFDBLearningTest",
"sail2.L2BridgeSubPortFloodTest",
"sail2.L2BridgePortTestI",
"sail2.L2BridgeSubPortFDBTest",
"sail2.L2MtuTest",
"sail2.L2MacMoveTestI ",
"sail2.L2MacMoveTestII ",
"sail2.L2MacMoveTestIII ",
"sail3.L3IPv4HostTest",
"sail3.L3IPv4LpmTest",
"sail3.L3IPv6HostTest",
"sail3.L3IPv6PrefixTest",
"sail3.L3IPv6LpmTest",
"sail3.L3IPv4EcmpHostTest",
"sail3.L3IPv6EcmpHostTest",
"sail3.L3IPv4EcmpLpmTest",
"sail3.L3IPv6EcmpLpmTest",
"sail3.L3IPv4EcmpHashSeedTest",
"sail3.L3IPv4LagTest",
"sail3.L3IPv6LagTest",
"sail3.L3EcmpLagTest",
"sail3.L3EcmpLagTestMini",
"sail3.L3VIIPv4HostTest",
"sail3.L3IPv4MacRewriteTest",
"sail3.L3VlanNeighborMacUpdateTest",
"sail3.L3MultipleLagTest",
"sail3.L3MultipleEcmpLagTest",
"sail3.L3BridgeAndSubPortRifTest",
"sail3.L3SubPortAndVLANRifTest",
"sail3.L3MtuTest",
"sail3.L3IPv4NeighborMacTest",
"sail3.L3IPv6NeighborMacTest",
"sail3.L3IPv4NeighborFdbAgeoutTest",
"sail3.L3IPv6NeighborFdbAgeoutTest",
"sail3.L3IPv4EcmpGroupMemberTest",
"sail3.L3IPv6EcmpGroupMemberTest",
"sail3.L3IPv4_32Test ",
"sail3.L3LpbkSubnetTest",
"saimirror.IngressLocalMirrorTest",
"saimirror.IngressRSpanMirrorTest",
"saimirror.IngressERSpanMirrorTest",
"saimirror.EgressLocalMirrorTest",
"saimirror.EgressERSpanMirrorTest",
"saitunnel.IpIpEncapTest",
"saitunnel.IpIpP2PTunnelDecapTest",
"saitunnel.IpIpP2PTunnelDecapOnlyTestBase",
"saitunnel.IpIpP2PTunnelDecapTestIpv4inIpv4",
"saitunnel.IpIpP2PTunnelDecapTestIpv6inIpv4",
"saitunnel.IpIpP2PTunnelDecapTestIpv4inIpv6",
"saitunnel.IpIpP2PTunnelDecapTestIpv6inIpv6",
"saitunnel.IpIpP2PTunnelDecapTestIpv4inIpv4GRE",
"saitunnel.IpIpP2PTunnelDecapTestIpv6inIpv4GRE",
"saitunnel.IpIpP2PTunnelDecapTestIpv4inIpv6GRE",
"saitunnel.IpIpP2PTunnelDecapTestIpv6inIpv6GRE"
]
PTF_SAI_TEST_CASE = [
"saisanity.L2TrunkToTrunkVlanTest",
"saisanity.L2TrunkToAccessVlanTest",
"saisanity.L2SanityTest"
]
WARM_BOOT_TEST_CASE = [
"warm_saisanity.WarmL2SanityTest"
]
PROBE_TEST_CASE = "sail3.L3IPv4HostTest"
|
[
"noreply@github.com"
] |
gord1306.noreply@github.com
|
e00073bc15e99ad9f8df5f66533a616d9a50b004
|
4edbeb3e2d3263897810a358d8c95854a468c3ca
|
/python3/re/sub1.py
|
a6845b67a8b3cf82fa3820856df278a7eb085aa4
|
[
"MIT"
] |
permissive
|
jtraver/dev
|
f505d15d45b67a59d11306cc7252114c265f388b
|
2197e3443c7619b856470558b737d85fe1f77a5a
|
refs/heads/master
| 2023-08-06T02:17:58.601861
| 2023-08-01T16:58:44
| 2023-08-01T16:58:44
| 14,509,952
| 0
| 1
|
MIT
| 2020-10-14T18:32:48
| 2013-11-19T00:51:19
|
Python
|
UTF-8
|
Python
| false
| false
| 631
|
py
|
#!/usr/bin/env python3
#!/usr/bin/python
import re
str1 = "An arbitrary string. Literal containing chars like: []{}!#$!@#!%ls813"
print(" ")
print(str1)
# taken from
# citrusleaf/monitoring-console/server/site-packages/pkg_resources.py
print(" ")
print("re.sub('[^A-Za-z0-9]+', '_', str1)")
print(re.sub('[^A-Za-z0-9]+', '_', str1))
print(" ")
print("re.sub('[^A-Za-z0-9]', '_', str1)")
print(re.sub('[^A-Za-z0-9]', '_', str1))
def replaceIt(str1):
print(" ")
print("return re.sub('[^A-Za-z0-9]+', '_', str1)")
return re.sub('[^A-Za-z0-9]+', '_', str1)
print(" ")
print("replaceIt(str1)")
print(replaceIt(str1))
|
[
"john@aeropsike.com"
] |
john@aeropsike.com
|
916f487636f1c022f25759cae3478469254fc569
|
fdd050eef1c075965b7717f014ae2eeb51c1483f
|
/gen_dataset/assemble_rnet_imglist.py
|
498b49b60c38c168aa4060135adb76b325bb2eaa
|
[] |
no_license
|
digital-nomad-cheng/MTCNN_PyTorch_Zero_To_One
|
a8b33b4b39c6f325280d04f22f0e72c532b33cd3
|
30c3fd34c29aa81c4353029c55721b54cc961534
|
refs/heads/master
| 2022-11-06T18:30:35.344107
| 2019-10-09T06:30:17
| 2019-10-09T06:30:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 929
|
py
|
import os
import config
import gen_dataset.assemble as assemble
if __name__ == '__main__':
anno_list = []
rnet_landmark_file = os.path.join(config.ANNO_STORE_DIR,config.RNET_LANDMARK_ANNO_FILENAME)
rnet_postive_file = os.path.join(config.ANNO_STORE_DIR, config.RNET_POSTIVE_ANNO_FILENAME)
rnet_part_file = os.path.join(config.ANNO_STORE_DIR, config.RNET_PART_ANNO_FILENAME)
rnet_neg_file = os.path.join(config.ANNO_STORE_DIR, config.RNET_NEGATIVE_ANNO_FILENAME)
anno_list.append(rnet_postive_file)
anno_list.append(rnet_part_file)
anno_list.append(rnet_neg_file)
anno_list.append(rnet_landmark_file)
imglist_file = os.path.join(config.ANNO_STORE_DIR, config.RNET_TRAIN_IMGLIST_FILENAME)
chose_count = assemble.assemble_data(imglist_file, anno_list)
print("PNet train annotation result file path:%s, total num of imgs: %d" % (imglist_file, chose_count))
|
[
"vincentcheng929@gmail.com"
] |
vincentcheng929@gmail.com
|
cd5cc4556e6d1854330409d0157e50db0125950f
|
3f9f7fe32c655e612f351302ad1945e92e514a31
|
/ut/scrap/xgoogle/sponsoredlinks.py
|
e3b4515c1a2095d5cb8875038664aca09f932b8a
|
[
"MIT"
] |
permissive
|
thorwhalen/ut
|
12ea7e0fd9bc452d71b0cc3d8ecdb527335a3c17
|
72dbdf41b0250708ad525030128cc7c3948b3f41
|
refs/heads/master
| 2023-02-17T06:44:11.053826
| 2023-02-07T13:22:07
| 2023-02-07T13:22:07
| 32,152,452
| 6
| 4
|
MIT
| 2023-02-16T00:34:33
| 2015-03-13T11:32:31
|
Python
|
UTF-8
|
Python
| false
| false
| 7,973
|
py
|
#!/usr/bin/python
#
# Peteris Krumins (peter@catonmat.net)
# http://www.catonmat.net -- good coders code, great reuse
#
# http://www.catonmat.net/blog/python-library-for-google-sponsored-links-search/
#
# Code is licensed under MIT license.
#
import re
import urllib.request, urllib.parse, urllib.error
import random
from html.entities import name2codepoint
from .BeautifulSoup import BeautifulSoup
from .browser import Browser, BrowserError
#
# TODO: join GoogleSearch and SponsoredLinks classes under a single base class
#
class SLError(Exception):
""" Sponsored Links Error """
pass
class SLParseError(Exception):
"""
Parse error in Google results.
self.msg attribute contains explanation why parsing failed
self.tag attribute contains BeautifulSoup object with the most relevant tag that failed to parse
Thrown only in debug mode
"""
def __init__(self, msg, tag):
self.msg = msg
self.tag = tag
def __str__(self):
return self.msg
def html(self):
return self.tag.prettify()
GET_ALL_SLEEP_FUNCTION = object()
class SponsoredLink(object):
""" a single sponsored link """
def __init__(self, title, url, display_url, desc):
self.title = title
self.url = url
self.display_url = display_url
self.desc = desc
class SponsoredLinks(object):
SEARCH_URL_0 = 'http://www.google.com/sponsoredlinks?q=%(query)s&btnG=Search+Sponsored+Links&hl=en'
NEXT_PAGE_0 = (
'http://www.google.com/sponsoredlinks?q=%(query)s&sa=N&start=%(start)d&hl=en'
)
SEARCH_URL_1 = 'http://www.google.com/sponsoredlinks?q=%(query)s&num=%(num)d&btnG=Search+Sponsored+Links&hl=en'
NEXT_PAGE_1 = 'http://www.google.com/sponsoredlinks?q=%(query)s&num=%(num)d&sa=N&start=%(start)d&hl=en'
def __init__(self, query, random_agent=False, debug=False):
self.query = query
self.debug = debug
self.browser = Browser(debug=debug)
self._page = 0
self.eor = False
self.results_info = None
self._results_per_page = 10
if random_agent:
self.browser.set_random_user_agent()
@property
def num_results(self):
if not self.results_info:
page = self._get_results_page()
self.results_info = self._extract_info(page)
if self.results_info['total'] == 0:
self.eor = True
return self.results_info['total']
def _get_results_per_page(self):
return self._results_per_page
def _set_results_par_page(self, rpp):
self._results_per_page = rpp
results_per_page = property(_get_results_per_page, _set_results_par_page)
def get_results(self):
if self.eor:
return []
page = self._get_results_page()
info = self._extract_info(page)
if self.results_info is None:
self.results_info = info
if info['to'] == info['total']:
self.eor = True
results = self._extract_results(page)
if not results:
self.eor = True
return []
self._page += 1
return results
def _get_all_results_sleep_fn(self):
return random.random() * 5 + 1 # sleep from 1 - 6 seconds
def get_all_results(self, sleep_function=None):
if sleep_function is GET_ALL_SLEEP_FUNCTION:
sleep_function = self._get_all_results_sleep_fn
if sleep_function is None:
sleep_function = lambda: None
ret_results = []
while True:
res = self.get_results()
if not res:
return ret_results
ret_results.extend(res)
return ret_results
def _maybe_raise(self, cls, *arg):
if self.debug:
raise cls(*arg)
def _extract_info(self, soup):
empty_info = {'from': 0, 'to': 0, 'total': 0}
stats_span = soup.find('span', id='stats')
if not stats_span:
return empty_info
txt = ''.join(stats_span.findAll(text=True))
txt = txt.replace(',', '').replace(' ', ' ')
matches = re.search(r'Results (\d+) - (\d+) of (?:about )?(\d+)', txt)
if not matches:
return empty_info
return {
'from': int(matches.group(1)),
'to': int(matches.group(2)),
'total': int(matches.group(3)),
}
def _get_results_page(self):
if self._page == 0:
if self._results_per_page == 10:
url = SponsoredLinks.SEARCH_URL_0
else:
url = SponsoredLinks.SEARCH_URL_1
else:
if self._results_per_page == 10:
url = SponsoredLinks.NEXT_PAGE_0
else:
url = SponsoredLinks.NEXT_PAGE_1
safe_url = url % {
'query': urllib.parse.quote_plus(self.query),
'start': self._page * self._results_per_page,
'num': self._results_per_page,
}
try:
page = self.browser.get_page(safe_url)
except BrowserError as e:
raise SLError('Failed getting %s: %s' % (e.url, e.error))
return BeautifulSoup(page)
def _extract_results(self, soup):
results = soup.findAll('div', {'class': 'g'})
ret_res = []
for result in results:
eres = self._extract_result(result)
if eres:
ret_res.append(eres)
return ret_res
def _extract_result(self, result):
title, url = self._extract_title_url(result)
display_url = self._extract_display_url(
result
) # Warning: removes 'cite' from the result
desc = self._extract_description(result)
if not title or not url or not display_url or not desc:
return None
return SponsoredLink(title, url, display_url, desc)
def _extract_title_url(self, result):
title_a = result.find('a')
if not title_a:
self._maybe_raise(
SLParseError, 'Title tag in sponsored link was not found', result
)
return None, None
title = ''.join(title_a.findAll(text=True))
title = self._html_unescape(title)
url = title_a['href']
match = re.search(r'q=(http[^&]+)&', url)
if not match:
self._maybe_raise(
SLParseError, 'URL inside a sponsored link was not found', result
)
return None, None
url = urllib.parse.unquote(match.group(1))
return title, url
def _extract_display_url(self, result):
cite = result.find('cite')
if not cite:
self._maybe_raise(SLParseError, '<cite> not found inside result', result)
return None
return ''.join(cite.findAll(text=True))
def _extract_description(self, result):
cite = result.find('cite')
if not cite:
return None
cite.extract()
desc_div = result.find('div', {'class': 'line23'})
if not desc_div:
self._maybe_raise(
ParseError, 'Description tag not found in sponsored link', result
)
return None
desc_strs = desc_div.findAll(text=True)[0:-1]
desc = ''.join(desc_strs)
desc = desc.replace('\n', ' ')
desc = desc.replace(' ', ' ')
return self._html_unescape(desc)
def _html_unescape(self, str):
def entity_replacer(m):
entity = m.group(1)
if entity in name2codepoint:
return chr(name2codepoint[entity])
else:
return m.group(0)
def ascii_replacer(m):
cp = int(m.group(1))
if cp <= 255:
return chr(cp)
else:
return m.group(0)
s = re.sub(r'&#(\d+);', ascii_replacer, str, re.U)
return re.sub(r'&([^;]+);', entity_replacer, s, re.U)
|
[
"thorwhalen1@gmail.com"
] |
thorwhalen1@gmail.com
|
3a3f5eca94ff903e351eda079b55486e241fbaf2
|
ee4c4c2cc6c663d4233d8145b01ae9eb4fdeb6c0
|
/configs/FDDB/retinanet/cfgs_res50_fddb_v4.py
|
3d62297eb03eef39a8f72de29894929e06afd4ac
|
[
"Apache-2.0"
] |
permissive
|
yangcyz/RotationDetection
|
c86f40f0be1142c30671d4fed91446aa01ee31c1
|
82706f4c4297c39a6824b9b53a55226998fcd2b2
|
refs/heads/main
| 2023-09-01T23:25:31.956004
| 2021-11-23T13:57:31
| 2021-11-23T13:57:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,992
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import numpy as np
from alpharotate.utils.pretrain_zoo import PretrainModelZoo
from configs._base_.models.retinanet_r50_fpn import *
from configs._base_.datasets.dota_detection import *
from configs._base_.schedules.schedule_1x import *
# schedule
BATCH_SIZE = 1
GPU_GROUP = "0,1,2"
NUM_GPU = len(GPU_GROUP.strip().split(','))
SAVE_WEIGHTS_INTE = 2000 * 2
DECAY_EPOCH = [8, 11, 20]
MAX_EPOCH = 12
WARM_EPOCH = 1 / 16.
DECAY_STEP = np.array(DECAY_EPOCH, np.int32) * SAVE_WEIGHTS_INTE
MAX_ITERATION = SAVE_WEIGHTS_INTE * MAX_EPOCH
WARM_SETP = int(WARM_EPOCH * SAVE_WEIGHTS_INTE)
# dataset
DATASET_NAME = 'FDDB'
CLASS_NUM = 1
# model
# backbone
pretrain_zoo = PretrainModelZoo()
PRETRAINED_CKPT = pretrain_zoo.pretrain_weight_path(NET_NAME, ROOT_PATH)
TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')
# bbox head
NUM_SUBNET_CONV = 4
LEVEL = ['P3', 'P4', 'P5', 'P6', 'P7']
BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]
ANCHOR_STRIDE = [8, 16, 32, 64, 128]
ANCHOR_SCALES = [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]
ANCHOR_RATIOS = [1, 1 / 1.5, 1.5]
# loss
CLS_WEIGHT = 1.0
REG_WEIGHT = 1.0 / 5.0
REG_LOSS_MODE = None
# eval
USE_07_METRIC = False
VERSION = 'RetinaNet_FDDB_2x_20211106'
"""
RetinaNet-H + 90
FLOPs: 830085163; Trainable params: 32159286
2007
cls : face|| Recall: 0.9648760330578512 || Precison: 0.5751231527093597|| AP: 0.9071560203590661
F1:0.9482526582400714 P:0.9697624190064795 R:0.9276859504132231
mAP is : 0.9071560203590661
2012
cls : face|| Recall: 0.9648760330578512 || Precison: 0.574887156339762|| AP: 0.959204678220418
F1:0.9482526582400714 P:0.9697624190064795 R:0.9276859504132231
mAP is : 0.959204678220418
AP50:95=0.5276534556388707
0.959204678220418 0.9301560772935049 0.8749958747257098 0.7844197465233099 0.683315839522552
0.558135300551797 0.3479441339258663 0.12669957890041392 0.011630901808271605 3.2424916862513164e-05
"""
|
[
"yangxue0827@126.com"
] |
yangxue0827@126.com
|
033a02153fd14c2d5475e0363c33629676f59c87
|
97ffb573b2f5f615c14347f9e2e8c12660c799a8
|
/libs/ignite_utils.py
|
26a021b900f7b27e00f4639a54305d87b4641b46
|
[] |
no_license
|
GOSSAN0602/OCR-Ancient-characters
|
d2745ea133b9d4595e860f03afa1d3eed7ee2104
|
b118a9f40127af505f6e324aaabf0fccd2ce9d12
|
refs/heads/master
| 2020-12-22T21:10:30.266685
| 2020-01-31T07:20:36
| 2020-01-31T07:20:36
| 236,933,401
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,756
|
py
|
import json
from logging import getLogger
import os
from time import perf_counter
import torch
import pandas as pd
from ignite.engine.engine import Engine, Events
from ignite.metrics import Average
from ignite.metrics.metric import Metric
def save_json(filepath, params):
with open(filepath, 'w') as f:
json.dump(params, f, indent=4)
class DictOutputTransform:
def __init__(self, key, index=0):
self.key = key
self.index = index
def __call__(self, x):
if self.index >= 0:
x = x[self.index]
return x[self.key]
def create_trainer(classifier, optimizer, device):
classifier.to(device)
def update_fn(engine, batch):
classifier.train()
optimizer.zero_grad()
# batch = [elem.to(device) for elem in batch]
x, y = [elem.to(device) for elem in batch]
loss, metrics, pred_y = classifier(x, y)
loss.backward()
optimizer.step()
return metrics, pred_y, y
trainer = Engine(update_fn)
for key in classifier.metrics_keys:
Average(output_transform=DictOutputTransform(key)).attach(trainer, key)
return trainer
def create_evaluator(classifier, device):
classifier.to(device)
def update_fn(engine, batch):
classifier.eval()
with torch.no_grad():
# batch = [elem.to(device) for elem in batch]
x, y = [elem.to(device) for elem in batch]
_, metrics, pred_y = classifier(x, y)
return metrics, pred_y, y
evaluator = Engine(update_fn)
for key in classifier.metrics_keys:
Average(output_transform=DictOutputTransform(key)).attach(evaluator, key)
return evaluator
class LogReport:
def __init__(self, evaluator=None, dirpath=None, logger=None):
self.evaluator = evaluator
self.dirpath = str(dirpath) if dirpath is not None else None
self.logger = logger or getLogger(__name__)
self.reported_dict = {} # To handle additional parameter to monitor
self.history = []
self.start_time = perf_counter()
def report(self, key, value):
self.reported_dict[key] = value
def __call__(self, engine):
elapsed_time = perf_counter() - self.start_time
elem = {'epoch': engine.state.epoch,
'iteration': engine.state.iteration}
elem.update({f'train/{key}': value
for key, value in engine.state.metrics.items()})
if self.evaluator is not None:
elem.update({f'valid/{key}': value
for key, value in self.evaluator.state.metrics.items()})
elem.update(self.reported_dict)
elem['elapsed_time'] = elapsed_time
self.history.append(elem)
if self.dirpath:
save_json(os.path.join(self.dirpath, 'log.json'), self.history)
self.get_dataframe().to_csv(os.path.join(self.dirpath, 'log.csv'), index=False)
# --- print ---
msg = ''
for key, value in elem.items():
if key in ['iteration']:
# skip printing some parameters...
continue
elif isinstance(value, int):
msg += f'{key} {value: >6d} '
else:
msg += f'{key} {value: 8f} '
# self.logger.warning(msg)
print(msg)
# --- Reset ---
self.reported_dict = {}
def get_dataframe(self):
df = pd.DataFrame(self.history)
return df
class SpeedCheckHandler:
def __init__(self, iteration_interval=10, logger=None):
self.iteration_interval = iteration_interval
self.logger = logger or getLogger(__name__)
self.prev_time = perf_counter()
def __call__(self, engine: Engine):
if engine.state.iteration % self.iteration_interval == 0:
cur_time = perf_counter()
spd = self.iteration_interval / (cur_time - self.prev_time)
self.logger.warning(f'{spd} iter/sec')
# reset
self.prev_time = cur_time
def attach(self, engine: Engine):
engine.add_event_handler(Events.ITERATION_COMPLETED, self)
class ModelSnapshotHandler:
def __init__(self, model, filepath='model_{count:06}.pt',
interval=1, logger=None):
self.model = model
self.filepath: str = str(filepath)
self.interval = interval
self.logger = logger or getLogger(__name__)
self.count = 0
def __call__(self, engine: Engine):
self.count += 1
if self.count % self.interval == 0:
filepath = self.filepath.format(count=self.count)
torch.save(self.model.state_dict(), filepath)
# self.logger.warning(f'save model to {filepath}...')
|
[
"google-dl-platform@googlegroups.com"
] |
google-dl-platform@googlegroups.com
|
3cec55de5e490b496ba347b8d217cacfc2c13666
|
b82057c77dd4d00ff9bca9a979a1a3075f0528c4
|
/Exicom_gateway/checks/ec500_dg_run_hrs_status
|
f262c1d3c3aca62ea1088f680672de55d4d0d89f
|
[] |
no_license
|
subhash-007/photography-blog
|
7ee0c4f930fee29d76106c45b09e6b76cb19cf56
|
b1ae66794b48bfe3862cb6e727a3a15a6ef79024
|
refs/heads/master
| 2020-03-31T04:33:00.276628
| 2019-07-12T06:00:39
| 2019-07-12T06:00:39
| 151,910,256
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,669
|
#!/usr/bin/python
"""
dgRunHrs of poller device.
This is part of device application.
Poller script determines the dgRunHrs of device.
poller script takes the snmp value of OID .1.3.6.1.4.1.38016.14.19.4 from snmp agent of device at specific interval.
Device dgRunHrs is sent to device application
"""
# ######################################################################
# Function: check_ec500_dg_run_hrs_status
#
# Parameters: info (SNMP Ouput) params (No Parameters)
#
# Output: device dg_run_hrs
# ######################################################################
ec500_dg_run_hrs_default_levels = ()
def check_ec500_dg_run_hrs_status(item, params, info):
"""
check_ec500_dg_run_hrs_status function fetches the dgRunHrs
Args:
item (str) Specific item on SNMP output on which we want to filter results
Kwargs:
params (tuple) Check parameters for critical and warning state of service
Returns:
state (int) :
0 : OK
1 : Warning
2: Critical
3: unknown
infotext(string):
plugin output
Example : OK - ;;;;
Raises:
Exception
"""
state = 3
infotext = "unknown_value"
perf_data = ['']
if info:
try:
state = 0
try :
ec500_dg_run_hrs = float(info[0][0])
except Exception,e:
ec500_dg_run_hrs = str(info[0][0].replace(" ","@"))
perf_data = [("ec500_dg_run_hrs", ec500_dg_run_hrs)]
return (state, "ec500_dg_run_hrs=%s" % ec500_dg_run_hrs, perf_data)
except Exception,e:
return (3, "ec500_dg_run_hrs=%s" % infotext.replace(" ","@"), perf_data)
else:
return (state, "ec500_dg_run_hrs=%s" %"No data retrieved".replace(" ","@"), perf_data)
# This check works on all SNMP hosts
"""
Dictionary-based declaration of all check types
"""
check_info["ec500_dg_run_hrs_status"] = {
'check_function': check_ec500_dg_run_hrs_status,
'service_description': 'ec500_dg_run_hrs_status',
'has_perfdata': True,
}
#########################################################################
# SNMP OID for the device dgRunHrs
#########################################################################
snmp_info["ec500_dg_run_hrs_status"] = ('.1.3.6.1.4.1.38016.14', ['19.4'])
|
[
"sbmoond@gmail.com"
] |
sbmoond@gmail.com
|
|
64c49ca2cb4c7c43f39de8540150f88edbcf456f
|
09b22d1bd1263e4082e6bba7afa2f2b7a66afd4a
|
/2 Panda/Merging Joining and concatenating.py
|
90dcad0a206666caa8afe348c070f41b30c17891
|
[] |
no_license
|
yogeshkushwahait/Machine-Learning-Using-Python
|
b70bc5334c4178fecc175451b8b7e04e50a60917
|
8102ce7b0cba5d48e923f979ae0a8e71c25857b1
|
refs/heads/master
| 2022-03-28T05:21:24.332537
| 2019-11-05T06:34:00
| 2020-01-09T16:06:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,079
|
py
|
# coding: utf-8
# In[1]:
import numpy as np
# In[2]:
import pandas as pd
# In[8]:
df1 = pd.DataFrame({'A':['A0','A1','A2','A3'],
'B':['B0','B1','B2','B3'],
'C':['C0','C1','C2','C3'],
'D':['D0','D1','D2','D3']},
index=[0,1,2,3])
# In[9]:
df2 = pd.DataFrame({'A':['A4','A5','A6','A7'],
'B':['B4','B5','B6','B7'],
'C':['C4','C5','C6','C7'],
'D':['D4','D5','D6','D7']},
index=[4,5,6,7])
# In[10]:
df3 = pd.DataFrame({'A':['A8','A9','A10','A11'],
'B':['B8','B9','B10','B11'],
'C':['C8','C9','C10','C11'],
'D':['D8','D9','D10','D11']},
index=[8,9,10,11])
# In[11]:
df1
# In[12]:
df2
# In[13]:
df3
# In[14]:
pd.concat([df1,df2,df3])
# In[15]:
pd.concat([df1,df2,df3],axis=1)
# In[18]:
left = pd.DataFrame({'key':['K0','K1','K2','K3'],
'A':['A0','A1','A2','A3'],
'B':['B0','B1','B2','B3']})
# In[19]:
right = pd.DataFrame({'key':['K0','K1','K2','K3'],
'C':['C0','C1','C2','C3'],
'D':['D0','D1','D2','D3']})
# In[20]:
left
# In[21]:
right
# In[22]:
pd.merge(left,right,how='inner',on='key') #By default inner
# In[27]:
left = pd.DataFrame({'key1':['K0','K0','K1','K2'],
'key2':['K0','K1','K0','K1'],
'A':['A0','A1','A2','A3'],
'B':['B0','B1','B2','B3']})
right = pd.DataFrame({'key1':['K0','K1','K1','K2'],
'key2':['K0','K0','K0','K0'],
'C':['C0','C1','C2','C3'],
'D':['D0','D1','D2','D3']})
# In[28]:
left
# In[29]:
right
# In[30]:
pd.merge(left,right,on=['key1','key2'])
# In[31]:
pd.merge(left,right,how='outer', on=['key1','key2'])
# In[32]:
pd.merge(left,right,how='right', on=['key1','key2'])
# In[34]:
pd.merge(left,right,how='left', on=['key1','key2'])
|
[
"rishav.jnit@gmail.com"
] |
rishav.jnit@gmail.com
|
86967f12db84d645c96b6fcc9ce73c7e7323e057
|
fcdfe976c9ed60b18def889692a17dc18a8dd6d7
|
/ros/py_ros/ur/follow_q_traj2.py
|
aefc0a0a5ab2d97e9674f530bc544538ff3fa55e
|
[] |
no_license
|
akihikoy/ay_test
|
4907470889c9bda11cdc84e8231ef3156fda8bd7
|
a24dfb720960bfedb94be3b4d147e37616e7f39a
|
refs/heads/master
| 2023-09-02T19:24:47.832392
| 2023-08-27T06:45:20
| 2023-08-27T06:45:20
| 181,903,332
| 6
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,998
|
py
|
#!/usr/bin/python
#\file follow_q_traj1.py
#\brief Follow a joint angle trajectory.
#\author Akihiko Yamaguchi, info@akihikoy.net
#\version 0.1
#\date Jun.12, 2018
#Based on: ../baxter/follow_q_traj1.py
import roslib
import rospy
import actionlib
import control_msgs.msg
import trajectory_msgs.msg
import time, math, sys, copy
from get_q1 import GetState
if __name__=='__main__':
rospy.init_node('ur_test')
joint_names= ['shoulder_pan_joint', 'shoulder_lift_joint', 'elbow_joint',
'wrist_1_joint', 'wrist_2_joint', 'wrist_3_joint']
client= actionlib.SimpleActionClient('/follow_joint_trajectory', control_msgs.msg.FollowJointTrajectoryAction)
client.cancel_goal() #Ensure to cancel the ongoing goal.
# Wait some seconds for the head action server to start or exit
if not client.wait_for_server(rospy.Duration(5.0)):
rospy.logerr('Exiting - Joint Trajectory Action Server Not Found')
rospy.signal_shutdown('Action Server not found')
sys.exit(1)
goal= control_msgs.msg.FollowJointTrajectoryGoal()
#goal.goal_time_tolerance= rospy.Time(0.1)
goal.trajectory.joint_names= joint_names
#NOTE: We need to specify velocities. Otherwise:
#error_code: -1
#error_string: "Received a goal without velocities"
def add_point(goal, time, positions, velocities):
point= trajectory_msgs.msg.JointTrajectoryPoint()
point.positions= copy.deepcopy(positions)
point.velocities= copy.deepcopy(velocities)
point.time_from_start= rospy.Duration(time)
goal.trajectory.points.append(point)
angles= GetState().position
add_point(goal, 0.0, angles, [0.0]*6)
add_point(goal, 1.0, [q+0.02 for q in angles], [0.0]*6)
add_point(goal, 3.0, [q-0.02 for q in angles], [0.0]*6)
add_point(goal, 4.0, angles, [0.0]*6)
goal.trajectory.header.stamp= rospy.Time.now()
client.send_goal(goal)
#client.cancel_goal()
#client.wait_for_result(timeout=rospy.Duration(20.0))
print client.get_result()
#rospy.signal_shutdown('Done.')
|
[
"info@akihikoy.net"
] |
info@akihikoy.net
|
1a1e06233c6c3a7141c912a01c8cfff5ff0c0416
|
9b64f0f04707a3a18968fd8f8a3ace718cd597bc
|
/huaweicloud-sdk-cdn/huaweicloudsdkcdn/v1/model/url_object.py
|
301b5c0fb38a39a4473ea350b7066e607b8d92f6
|
[
"Apache-2.0"
] |
permissive
|
jaminGH/huaweicloud-sdk-python-v3
|
eeecb3fb0f3396a475995df36d17095038615fba
|
83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b
|
refs/heads/master
| 2023-06-18T11:49:13.958677
| 2021-07-16T07:57:47
| 2021-07-16T07:57:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,090
|
py
|
# coding: utf-8
import re
import six
class UrlObject:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'id': 'str',
'url': 'str',
'status': 'str',
'create_time': 'int',
'task_id': 'str',
'process_reason': 'str'
}
attribute_map = {
'id': 'id',
'url': 'url',
'status': 'status',
'create_time': 'create_time',
'task_id': 'task_id',
'process_reason': 'process_reason'
}
def __init__(self, id=None, url=None, status=None, create_time=None, task_id=None, process_reason=None):
"""UrlObject - a model defined in huaweicloud sdk"""
self._id = None
self._url = None
self._status = None
self._create_time = None
self._task_id = None
self._process_reason = None
self.discriminator = None
if id is not None:
self.id = id
if url is not None:
self.url = url
if status is not None:
self.status = status
if create_time is not None:
self.create_time = create_time
if task_id is not None:
self.task_id = task_id
if process_reason is not None:
self.process_reason = process_reason
@property
def id(self):
"""Gets the id of this UrlObject.
任务id
:return: The id of this UrlObject.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this UrlObject.
任务id
:param id: The id of this UrlObject.
:type: str
"""
self._id = id
@property
def url(self):
"""Gets the url of this UrlObject.
url的地址。
:return: The url of this UrlObject.
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this UrlObject.
url的地址。
:param url: The url of this UrlObject.
:type: str
"""
self._url = url
@property
def status(self):
"""Gets the status of this UrlObject.
url的状态 processing, succeed, failed,分别表示处理中,完成,失败。
:return: The status of this UrlObject.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this UrlObject.
url的状态 processing, succeed, failed,分别表示处理中,完成,失败。
:param status: The status of this UrlObject.
:type: str
"""
self._status = status
@property
def create_time(self):
"""Gets the create_time of this UrlObject.
url创建时间,相对于UTC 1970-01-01到当前时间相隔的毫秒数。
:return: The create_time of this UrlObject.
:rtype: int
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""Sets the create_time of this UrlObject.
url创建时间,相对于UTC 1970-01-01到当前时间相隔的毫秒数。
:param create_time: The create_time of this UrlObject.
:type: int
"""
self._create_time = create_time
@property
def task_id(self):
"""Gets the task_id of this UrlObject.
url所属task的id。
:return: The task_id of this UrlObject.
:rtype: str
"""
return self._task_id
@task_id.setter
def task_id(self, task_id):
"""Sets the task_id of this UrlObject.
url所属task的id。
:param task_id: The task_id of this UrlObject.
:type: str
"""
self._task_id = task_id
@property
def process_reason(self):
"""Gets the process_reason of this UrlObject.
标记处理原因。
:return: The process_reason of this UrlObject.
:rtype: str
"""
return self._process_reason
@process_reason.setter
def process_reason(self, process_reason):
"""Sets the process_reason of this UrlObject.
标记处理原因。
:param process_reason: The process_reason of this UrlObject.
:type: str
"""
self._process_reason = process_reason
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UrlObject):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
f91cda1a698a8414cfa6864362ce4e7d7c5b3a8e
|
6a1eba9825c67782102972aee1759f0e59c9eef7
|
/naeval/morph/models/__init__.py
|
9b598a2c2e6add867c5e7b31ef389418ccf23090
|
[] |
no_license
|
buriy/naeval
|
6d238592ba6c02a625ccf7b643af84350b913de8
|
455cfb07047140aff2e4700a1630db7682c4d06a
|
refs/heads/master
| 2022-08-08T00:19:06.185029
| 2020-05-05T06:52:05
| 2020-05-05T06:52:05
| 264,369,308
| 0
| 0
| null | 2020-05-16T05:52:04
| 2020-05-16T05:52:03
| null |
UTF-8
|
Python
| false
| false
| 338
|
py
|
from .udpipe import UDPipeModel # noqa
from .spacy import SpacyModel # noqa
from .maru import MaruModel # noqa
from .rnnmorph import RNNMorphModel # noqa
from .deeppavlov import DeeppavlovModel, DeeppavlovBERTModel # noqa
from .rupostagger import RuPosTaggerModel # noqa
from .slovnet import SlovnetModel, SlovnetBERTModel # noqa
|
[
"alex@alexkuk.ru"
] |
alex@alexkuk.ru
|
e3c18ae427bff89a74c4a155958e3776f118fc9d
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_142/828.py
|
0dd233e9b038c694d7b9e783862aca4c210c8aa3
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,573
|
py
|
import sys,math
cases = int(raw_input())
plays = []
def skeleton(string):
last_char = string[0]
skel = last_char
for char in string[1:]:
if char != last_char:
skel += char
last_char = char
return skel
def is_possible(strings):
skel = skeleton(strings[0])
for string in strings[1:]:
if skeleton(string) != skel:
return False
return True
def mean_length(strings):
#print strings
cum = 0
for string in strings:
cum += len(string)
#print cum
m = float(cum) / len(strings)
m = int(round(m))
nearest_to_mean = 0
for i in range(len(strings)):
if (len(strings[i])-m)*(len(strings[i])-m) < (len(strings[nearest_to_mean])-m)*(len(strings[nearest_to_mean])-m):
nearest_to_mean = i
return nearest_to_mean
def numberLetter(string, letter, group):
n = 0
last_char = ''
curr = -1
for char in string:
if last_char != char and char == letter:
curr+=1
if char == letter and group == curr:
n+=1
last_char = char
return n
def vass(num):
if num > 0: return num
else: return -num
def moves(skeleton,string,target):
m = 0
for i in range(len(skeleton)):
letter = skeleton[i]
curr = 0
for j in range(i):
if skeleton[j] == letter:
curr+=1
m += vass(numberLetter(string,letter,curr)- numberLetter(target,letter,curr))
return m
def target(strings,skel):
target = ""
for i in range(len(skel)):
letter = skel[i]
curr = 0
for j in range(i):
if skel[j] == letter:
curr+=1
cum = 0
for string in strings:
cum += numberLetter(string,letter,curr)
num = int(round(float(cum)/len(strings)))
#print "letter " + letter + " : " + str(num)
for i in range(num):
target += letter
return target
for i in range(cases):
n_strings = int(raw_input())
#print answer
strings = []
for j in range(n_strings):
strings.append(raw_input())
plays.append(strings)
case = 0
for strings in plays:
case += 1
sys.stdout.write("Case #"+str(case)+": ")
#print rows
if is_possible(strings):
skel = skeleton(strings[0])
tg = target(strings,skel)
#print strings
#print tg
tot_moves = 0
for string in strings:
tot_moves += moves(skel,string,tg)
print tot_moves
else:
print "Fegla Won";
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
ac6c1b5d5cf591fdb17877696c50099d56778da8
|
ae7b262ecd72f2fac76c7fe2cff3b8efd7224cb9
|
/ContainsDuplicateII.py
|
cfe3bcb7c5be59b9be1922b3e736ae55d010407c
|
[] |
no_license
|
FengFengHan/LeetCode
|
02a8041f7413b14bed5ac17af1ba68237b159959
|
c2d449f2a93815f31c432805a6b4b8008d09d3df
|
refs/heads/master
| 2021-01-10T18:44:06.374304
| 2016-04-16T13:18:55
| 2016-04-16T13:18:55
| 56,384,615
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 540
|
py
|
class Solution(object):
def containsNearbyDuplicate(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
"""
indexs = {}
for i in range(len(nums)):
indexs.setdefault(nums[i], [])
indexs[nums[i]].append(i)
for lis in indexs.values():
if len(lis) > 1:
for j in range(1,len(lis)):
if (lis[j] - lis[j-1]) <= k:
return True
return False
|
[
"HAN@HandeMacBook-Pro.local"
] |
HAN@HandeMacBook-Pro.local
|
542da5ea76d78e1c3c42f517cd2c7ba1233314d2
|
43530c02696704af51742144638df037b151a259
|
/apps/friend/migrations/0001_initial.py
|
f45900443d3e401f9a587a9677a99737475e2e2d
|
[] |
no_license
|
LAdkins81/friends
|
58160f4eb5096d96f5a59edc45de38ba5cd388f7
|
8f189965fede9fb13fb94ecbd6d0f9912a0162cf
|
refs/heads/master
| 2021-01-17T08:00:08.866341
| 2017-03-03T18:05:48
| 2017-03-03T18:05:48
| 83,826,999
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 856
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-03 15:57
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('login_reg', '0002_auto_20170303_0957'),
]
operations = [
migrations.CreateModel(
name='Friend',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('friend', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='userfriend', to='login_reg.User')),
],
),
]
|
[
"lahell@gmail.com"
] |
lahell@gmail.com
|
84fa63fb19d08df62b8211a7894d160f837f3aae
|
2e0396c23d592338bec48daf73d5fd1e423b4f41
|
/use_max_rssi_localization.py
|
44ac3535ce9d92bfdafdf4445680de04dfbdea3f
|
[] |
no_license
|
wystephen/localization-wifi
|
4504e8fd819847e9b18641641769bf93c081c4f9
|
5dca0d0df6bced8a519f02711692c6ddfaa57e12
|
refs/heads/master
| 2016-09-11T13:01:17.162633
| 2015-12-29T01:32:29
| 2015-12-29T01:32:29
| 31,436,755
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,559
|
py
|
__author__ = 'Administrator'
# -*- coding:utf-8 -*-
import numpy
import data_transfor
import data_preprocessing
import matplotlib.pyplot as plt
def pose_of_max_rssi(pose, wifi, max_rssi):
'''
找到某个ap达到最大值的第一个pose的序号
:param pose:
:param wifi:
:param max_rssi:
:return:
'''
max_rssi_index = numpy.zeros(len(max_rssi))
for i in range(len(wifi[:, 1])):
for j in range(len(max_rssi)):
if max_rssi[j] == wifi[i, j]:
max_rssi_index[j] = i
print 'max_rssi_index_len:', len(max_rssi_index)
pose_array = numpy.zeros([len(max_rssi_index), 2])
for i in range(len(max_rssi_index)):
pose_array[i, :] = pose[max_rssi_index[i], :]
return max_rssi_index, pose_array
def simple_location(pose, wifi, pose_array):
'''
根据信号最强的三个ap估计自己的位置
:param pose: 实际上貌似没用到,以后再改
:param wifi: 输入wifi的特征数组
:param pose_array: 有多少个ap就有多少个点,保存的是距这个ap(理想是最近)较近的点
:return:输出估计的坐标
'''
out_pose = numpy.zeros([len(pose[:, 1]), 2])
max_rssi_tmp = numpy.zeros(2)
for i in range(len(pose[:, 1])):
#find max 4 index in the wifi
max_rssi = numpy.zeros([4, 2])
for j in range(len(wifi[i, :])):
if wifi[i, j] > max_rssi[3, 1]:
max_rssi[3,0] = j
max_rssi[3,1] = wifi[i,j]
for k in range(0,2):
k = 2-k
if max_rssi[k+1,1] > max_rssi[k,1]:
max_rssi_tmp[:] = max_rssi[k,:]
max_rssi[k,:] = max_rssi[k+1,:]
max_rssi[k+1,:] = max_rssi_tmp[:]
out_pose[i,0] = pose_array[max_rssi[0,0],0]/4.0 +\
pose_array[max_rssi[1,0],0]/4.0+\
pose_array[max_rssi[2,0],0]/4.0+\
pose_array[max_rssi[3,0],0]/4.0
out_pose[i,1] = pose_array[max_rssi[0,0],1]/4.0 +\
pose_array[max_rssi[1,0],1]/4.0+\
pose_array[max_rssi[2,0],1]/4.0+\
pose_array[max_rssi[3,0],1]/4.0
#测试直接用最大的那个 看误差,效果不好
#out_pose[i,0] = pose_array[max_rssi[0,0],0]
#out_pose[i,1] = pose_array[max_rssi[0,0],1]
return out_pose
if __name__ == '__main__':
pose, wifi = data_preprocessing.read_end_data('20153221527end_wifi.txt', '20153221527end_pose.txt')
pose2, wifi2 = data_preprocessing.read_end_data('20153141218end_wifi.txt', '20153141218end_pose.txt')
pose3, wifi3 = data_preprocessing.read_end_data('20153141231end_wifi.txt', '20153141231end_pose.txt')
pose4, wifi4 = data_preprocessing.read_end_data('20153221517end_wifi.txt', '20153221517end_pose.txt')
max_rssi = data_preprocessing.find_ap_pose(pose, wifi)
max_rssi2 = data_preprocessing.find_ap_pose(pose2, wifi2)
max_rssi3 = data_preprocessing.find_ap_pose(pose3, wifi3)
max_rssi4 = data_preprocessing.find_ap_pose(pose4, wifi4)
max_rssi_index, pose_array = pose_of_max_rssi(pose, wifi, max_rssi)
max_rssi_index2, pose_array2 = pose_of_max_rssi(pose2, wifi2, max_rssi2)
max_rssi_index3, pose_array3 = pose_of_max_rssi(pose3, wifi3, max_rssi3)
max_rssi_index4, pose_array4 = pose_of_max_rssi(pose4, wifi4, max_rssi4)
# print pose_array
plt.figure(1)
#plt.axis([-50, 200, -50, 200])
#plt.plot(pose_array[:,0],pose_array[:, 1], 'o')
plt.plot(pose_array2[:, 0], pose_array2[:, 1], 'o')
plt.plot(pose_array3[:, 0], pose_array3[:, 1], 'o')
#plt.plot(pose_array4[:,0],pose_array4[:,1], 'o')
plt.grid(1)
plt.figure(2)
source_pose_array = pose_array/4.0+pose_array2/4.0+pose_array3/4.0+pose_array4/4.0
#source_pose_array = pose_array3
out_pose1 = simple_location(pose,wifi,source_pose_array)
err1 = data_preprocessing.pose_dis(out_pose1,pose)
plt.plot(err1,'r')
out_pose2 = simple_location(pose2,wifi2,source_pose_array)
err2 = data_preprocessing.pose_dis(out_pose2,pose2)
plt.plot(err2,'b')
out_pose3 = simple_location(pose3,wifi3,source_pose_array)
err3 = data_preprocessing.pose_dis(out_pose3,pose3)
plt.plot(err3,'y')
out_pose4 = simple_location(pose4,wifi4,source_pose_array)
err4 = data_preprocessing.pose_dis(out_pose4,pose4)
plt.plot(err4,'g')
plt.grid(2)
plt.figure(3)
ok_times = 0
for i in range(len(err1)):
if err1[i] < 5:
ok_times+=1
print 'acc:', ok_times*1.0/len(err1)
plt.show()
|
[
"551619855@qq.com"
] |
551619855@qq.com
|
7e1d7f9452e893ef89f39038ead722d31fe328a4
|
3481356e47dcc23d06e54388153fe6ba795014fa
|
/swig_test/pybuffer/pybuffer.py
|
b1375b20698a70cb441ca9be087f2906c4867c61
|
[] |
no_license
|
Chise1/pyhk
|
c09a4c5a06ce93e7fe50c0cc078429f7f63fcb2f
|
44bdb51e1772efad9d0116feab1c991c601aa68a
|
refs/heads/master
| 2021-01-03T08:24:47.255171
| 2020-02-29T04:05:30
| 2020-02-29T04:05:30
| 239,998,705
| 1
| 0
| null | 2020-02-28T07:35:46
| 2020-02-12T11:40:39
|
C
|
UTF-8
|
Python
| false
| false
| 3,056
|
py
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (2, 7, 0):
def swig_import_helper():
import importlib
pkg = __name__.rpartition('.')[0]
mname = '.'.join((pkg, '_pybuffer')).lstrip('.')
try:
return importlib.import_module(mname)
except ImportError:
return importlib.import_module('_pybuffer')
_pybuffer = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_pybuffer', [dirname(__file__)])
except ImportError:
import _pybuffer
return _pybuffer
try:
_mod = imp.load_module('_pybuffer', fp, pathname, description)
finally:
if fp is not None:
fp.close()
return _mod
_pybuffer = swig_import_helper()
del swig_import_helper
else:
import _pybuffer
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except __builtin__.Exception:
class _object:
pass
_newclass = 0
# This file is compatible with both classic and new-style classes.
|
[
"chise123@live.com"
] |
chise123@live.com
|
cb58f01bb96d29b45a0501db61d84089565b32e1
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02853/s487102081.py
|
d4f290c3634e67a7bf9d48e374c08fa37acc1935
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 250
|
py
|
x,y = map(int,input().split())
def point(a):
if a == 1:
return 300000
elif a == 2:
return 200000
elif a == 3:
return 100000
else:
return 0
c = point(x)
b = point(y)
if x == 1 and y == 1:
print(1000000)
else:
print(c+b)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
ff177921548f852db1a384ec33200275af66728e
|
94c1805df5a09c39159d502f420d19ad54b567fc
|
/runtime/deps/gyp/test/configurations/invalid/gyptest-configurations.py
|
bd844b95dd8a330a237123acbaf741c1c816187d
|
[
"BSD-3-Clause",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
tmikov/jscomp
|
9805a5a4d06520549c57380f0df4a1c0aa0dab56
|
83828441cb38ec96603a6a60be06977d4852940a
|
refs/heads/develop
| 2021-01-19T02:56:35.102659
| 2016-04-12T06:19:30
| 2016-04-12T06:19:30
| 36,981,674
| 237
| 13
|
Apache-2.0
| 2018-10-14T09:48:12
| 2015-06-06T13:49:26
|
C
|
UTF-8
|
Python
| false
| false
| 923
|
py
|
#!/usr/bin/env python
# Copyright (c) 2010 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies build of an executable in three different configurations.
"""
import TestGyp
# Keys that do not belong inside a configuration dictionary.
invalid_configuration_keys = [
'actions',
'all_dependent_settings',
'configurations',
'dependencies',
'direct_dependent_settings',
'libraries',
'link_settings',
'sources',
'standalone_static_library',
'target_name',
'type',
]
test = TestGyp.TestGyp()
for test_key in invalid_configuration_keys:
test.run_gyp('%s.gyp' % test_key, status=1, stderr=None)
expect = ['%s not allowed in the Debug configuration, found in target '
'%s.gyp:configurations#target' % (test_key, test_key)]
test.must_contain_all_lines(test.stderr(), expect)
test.pass_test()
|
[
"tmikov@gmail.com"
] |
tmikov@gmail.com
|
867fbd9385ee1f515de8c6bdcfc4433562c0711f
|
5e324af46c554b88b97ee26886b05c88457ff0f5
|
/clients/api/client_list.py
|
c29315aecdbe43ce46d49900a6fc6013ae660bf1
|
[] |
no_license
|
doubleclickdetroit/dindintonight
|
1bda8851e49782d4dc16ca77d46e4b1f431c2b52
|
9769e1a96730b02511d25af8828b075dff5c35b5
|
refs/heads/master
| 2016-08-04T22:01:08.083566
| 2014-07-26T18:58:58
| 2014-07-26T18:58:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,198
|
py
|
from clients.models import Client, ClientUser
from clients.serializers import ClientSerializer
from core.api import RESTView
class ClientList(RESTView):
"""
Client List API Class
Example URLs:
/api/v1/clients/
"""
URL_NAME = 'api-v1-client-list'
def _handle_get(self, request, *args, **kwargs):
results = Client.objects.all()
user = request.GET.get('user', None)
if user is not None:
results = results.filter(users__pk=user)
return self.list_results(request, results, ClientSerializer, use_cache=True,
cache_time=self.CACHE_30_DAYS, cache_version=1)
def _handle_post(self, request, *args, **kwargs):
"""
Sample post data:
{
"name": "Test by Rob"
}
"""
serializer = ClientSerializer(data=request.DATA)
if serializer.is_valid():
serializer.save()
# create the user to link them to the client they just created
ClientUser.objects.create(client=serializer.object, user=request.user)
return serializer.data
return self.raise_bad_request(serializer.errors)
|
[
"rgarrison3@gmail.com"
] |
rgarrison3@gmail.com
|
450b6295dff2b84499d8e5a4ad95db6e63d3b811
|
6c8305ea1df9687df1c0d2b0ace56733516c6322
|
/readthedocs/builds/migrations/0044_alter_version_documentation_type.py
|
86ad50cfe4c8429849bfd08a5e13b50d29d23d61
|
[
"MIT"
] |
permissive
|
readthedocs/readthedocs.org
|
9806083aa744c2308267919480a692e1e003e45d
|
bf88ce6d1085d922322a5fadce63a22c5544c830
|
refs/heads/main
| 2023-09-05T20:22:34.281891
| 2023-09-05T12:41:52
| 2023-09-05T12:41:52
| 841,835
| 2,894
| 1,509
|
MIT
| 2023-09-14T20:36:00
| 2010-08-16T19:18:06
|
Python
|
UTF-8
|
Python
| false
| false
| 958
|
py
|
# Generated by Django 3.2.13 on 2022-05-30 10:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("builds", "0043_add_cancelled_state"),
]
operations = [
migrations.AlterField(
model_name="version",
name="documentation_type",
field=models.CharField(
choices=[
("sphinx", "Sphinx Html"),
("mkdocs", "Mkdocs"),
("sphinx_htmldir", "Sphinx HtmlDir"),
("sphinx_singlehtml", "Sphinx Single Page HTML"),
("mkdocs_html", "Mkdocs Html Pages"),
("generic", "Generic"),
],
default="sphinx",
help_text="Type of documentation the version was built with.",
max_length=20,
verbose_name="Documentation type",
),
),
]
|
[
"humitos@gmail.com"
] |
humitos@gmail.com
|
63b02058dd9984a335a7f72ff4650f3fda2d6879
|
b9bc60cca34c6b4f8a750af6062f357f18dfcae2
|
/tensorflow/contrib/copy_graph/python/util/copy_test.py
|
68865fab497d3b72ff411643e196c193ac79df2e
|
[
"Apache-2.0"
] |
permissive
|
lidenghui1110/tensorflow-0.12.0-fpga
|
7c96753aafab5fe79d5d0c500a0bae1251a3d21b
|
f536d3d0b91f7f07f8e4a3978d362cd21bad832c
|
refs/heads/master
| 2022-11-20T11:42:11.461490
| 2017-07-28T09:28:37
| 2017-07-28T09:28:37
| 98,633,565
| 3
| 2
|
Apache-2.0
| 2022-11-15T05:22:07
| 2017-07-28T09:29:01
|
C++
|
UTF-8
|
Python
| false
| false
| 3,323
|
py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.copy_graph.python.util.copy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.framework.python.framework import tensor_util
graph1 = tf.Graph()
graph2 = tf.Graph()
class CopyVariablesTest(tf.test.TestCase):
def testVariableCopy(self):
with graph1.as_default():
#Define a Variable in graph1
some_var = tf.Variable(2)
#Initialize session
sess1 = tf.Session()
#Initialize the Variable
tf.global_variables_initializer().run(session=sess1)
#Make a copy of some_var in the defsult scope in graph2
copy1 = tf.contrib.copy_graph.copy_variable_to_graph(
some_var, graph2)
#Make another copy with different scope
copy2 = tf.contrib.copy_graph.copy_variable_to_graph(
some_var, graph2, "test_scope")
#Initialize both the copies
with graph2.as_default():
#Initialize Session
sess2 = tf.Session()
#Initialize the Variables
tf.global_variables_initializer().run(session=sess2)
#Ensure values in all three variables are the same
v1 = some_var.eval(session=sess1)
v2 = copy1.eval(session=sess2)
v3 = copy2.eval(session=sess2)
assert isinstance(copy1, tf.Variable)
assert isinstance(copy2, tf.Variable)
assert v1 == v2 == v3 == 2
class CopyOpsTest(tf.test.TestCase):
def testOpsCopy(self):
with graph1.as_default():
#Initialize a basic expression y = ax + b
x = tf.placeholder("float")
a = tf.Variable(3.0)
b = tf.constant(4.0)
ax = tf.mul(x, a)
y = tf.add(ax, b)
#Initialize session
sess1 = tf.Session()
#Initialize the Variable
tf.global_variables_initializer().run(session=sess1)
#First, initialize a as a Variable in graph2
a1 = tf.contrib.copy_graph.copy_variable_to_graph(
a, graph2)
#Initialize a1 in graph2
with graph2.as_default():
#Initialize session
sess2 = tf.Session()
#Initialize the Variable
tf.global_variables_initializer().run(session=sess2)
#Initialize a copy of y in graph2
y1 = tf.contrib.copy_graph.copy_op_to_graph(
y, graph2, [a1])
#Now that y has been copied, x must be copied too.
#Get that instance
x1 = tf.contrib.copy_graph.get_copied_op(x, graph2)
#Compare values of y & y1 for a sample input
#and check if they match
v1 = y.eval({x: 5}, session=sess1)
v2 = y1.eval({x1: 5}, session=sess2)
assert v1 == v2
if __name__ == "__main__":
tf.test.main()
|
[
"lidenghui@hadoop67.localdomain"
] |
lidenghui@hadoop67.localdomain
|
9392be60bb332ad98a912eadab328e3f523a5a0c
|
200ec10b652f9c504728890f6ed7d20d07fbacae
|
/views.py
|
d236861e828287d0c761fe880d0bf9fc996219ee
|
[] |
no_license
|
Ks-Ksenia/flask_shop
|
f4edc17669c29ae02a89e836c3c48230147ae84f
|
9eb44fd22bf99913c9824ea35e3922cb14ef2451
|
refs/heads/master
| 2023-03-01T13:55:20.749127
| 2021-02-14T09:29:04
| 2021-02-14T09:29:04
| 338,767,599
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,331
|
py
|
from app import app, db
from flask import render_template, redirect, request, flash, url_for
from flask_login import login_required, login_user, logout_user, current_user
from models import User, Product
from forms import LoginForm, RegistrationForm
@app.route('/')
def index():
return redirect(url_for('menu.catalog'))
@app.route('/search/')
def search():
q = request.args.get('q')
page = request.args.get('page')
if page and page.isdigit():
page = int(page)
else:
page = 1
pages, products = [], []
if q:
products = Product.query.filter(Product.product_name.contains(q)|Product.product_name.contains(q))
pages = products.paginate(page=page, per_page=1)
return render_template('search.html', products=products, pages=pages, q=q)
@app.login_manager.user_loader
def load_user(user_id):
return db.session.query(User).get(user_id)
@app.route('/login', methods=['POST', 'GET'])
def login():
if not current_user.is_authenticated:
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter(User.email == form.email.data).first()
if user and user.check_password(form.password.data):
login_user(user, remember=form.remember.data)
return redirect(request.args.get('next') or url_for('index'))
else:
flash('Неверный email или пароль')
return render_template('login.html', form=form, title="Вход")
return redirect(url_for('index'))
@app.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('login'))
@app.route('/register', methods=['POST', 'GET'])
def registration():
form = RegistrationForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if not user:
user = User(username=form.username.data, email=form.email.data)
user.set_password(form.password1.data)
db.session.add(user)
db.session.commit()
return redirect(url_for('login'))
else:
flash('Пользователь с таким email уже существует')
return render_template('singup.html', form=form, title='Регистрация')
|
[
"demag74@mail.ru"
] |
demag74@mail.ru
|
5f8498060a4d282003fadb26e8a0b61c79616b80
|
deb31ab5397c8a669e30bea0f428afaf8a2ebd30
|
/web/migrations/0024_auto_20190412_0008.py
|
fe783346d9264b6a461c6b470fdf443e9bf853dc
|
[] |
no_license
|
zgd0228/product_base
|
1b8bcc43f0a96e5bac09e77f363ed97b582b48cc
|
83948b0b929c852c52503bca3c66b55f3f352f1c
|
refs/heads/master
| 2020-05-15T03:59:17.727156
| 2019-04-18T13:07:05
| 2019-04-18T13:07:05
| 182,077,112
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,393
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2019-04-11 16:08
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('web', '0023_auto_20190411_2317'),
]
operations = [
migrations.CreateModel(
name='ScoreRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField(verbose_name='理由')),
('score', models.IntegerField(help_text='违纪扣分写负值,表现邮寄加分写正值', verbose_name='分值')),
],
),
migrations.AddField(
model_name='student',
name='score',
field=models.IntegerField(default=100, verbose_name='积分'),
),
migrations.AddField(
model_name='scorerecord',
name='student',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='web.Student', verbose_name='学生'),
),
migrations.AddField(
model_name='scorerecord',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='web.UserInfo', verbose_name='执行人'),
),
]
|
[
"zgd0228@outlook.com"
] |
zgd0228@outlook.com
|
10a026637a4917ff43db50d14672d19b4e50d5ef
|
cf5b2850dc9794eb0fc11826da4fd3ea6c22e9b1
|
/xlsxwriter/test/comparison/test_merge_cells01.py
|
a5c44bfd4b9d19ec1b384c2313ced7464f9620a3
|
[
"BSD-2-Clause"
] |
permissive
|
glasah/XlsxWriter
|
bcf74b43b9c114e45e1a3dd679b5ab49ee20a0ec
|
1e8aaeb03000dc2f294ccb89b33806ac40dabc13
|
refs/heads/main
| 2023-09-05T03:03:53.857387
| 2021-11-01T07:35:46
| 2021-11-01T07:35:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,075
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('merge_cells01.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
format = workbook.add_format({'align': 'center'})
worksheet.set_selection('A4')
worksheet.merge_range('A1:A2', 'col1', format)
worksheet.merge_range('B1:B2', 'col2', format)
worksheet.merge_range('C1:C2', 'col3', format)
worksheet.merge_range('D1:D2', 'col4', format)
workbook.close()
self.assertExcelEqual()
|
[
"jmcnamara@cpan.org"
] |
jmcnamara@cpan.org
|
77564f1e0f6f6a59e0bc5ce08ac4e1446e6b1360
|
8ba62e9ceb9307f2fe81db0cbfaed79fee12f51a
|
/Baekjoon/Dynamic Programming - New/타일 채우기.py
|
29441bfd83cbaa189fa5b02b88ba0feaa001b0e0
|
[] |
no_license
|
Seoyoung2/Algorithm_Study
|
9478e2ef183eed60c9670a05688cd835a2f69acd
|
ea2073e788f3c67a03b1168bbeaa9609e5e6e1bf
|
refs/heads/master
| 2023-02-13T05:04:46.045416
| 2021-01-14T08:01:37
| 2021-01-14T08:01:37
| 199,292,037
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 565
|
py
|
# 3×N 크기의 벽을 2×1, 1×2 크기의 타일로 채우는 경우의 수를 구해보자.
# 입력 : 첫째 줄에 n이 주어진다. (1 ≤ n ≤ 30)
# 출력 : 첫째 줄에 경우의 수를 출력한다.
# dp[n] = 3 * dp[n-2] + 2 * (dp[n-4] + dp[n-6] + ... + dp[0])
# n이 홀수면 타일로 채우기 불가능
import sys
n = int(sys.stdin.readline())
dp = [0 for _ in range(31)]
dp[0], dp[1], dp[2] = 1, 0, 3
for i in range(4, n+1, 2):
dp[i] = 3 * dp[i-2]
for j in range(4, i+1, 2):
dp[i] += 2 * dp[i-j]
print(dp[n])
|
[
"ww0111@naver.com"
] |
ww0111@naver.com
|
d64a3757a6473ecc106814852095fdc3456b4424
|
d18ed72d6f8d27dd8a13eab5c6366f9dca48aa6b
|
/espresso/vinil/content/actors/test/index.odb
|
20b348324b0ce513e258e924725f89b2f08481d1
|
[
"Apache-2.0"
] |
permissive
|
danse-inelastic/AbInitio
|
6f1dcdd26a8163fa3026883fb3c40f63d1105b0c
|
401e8d5fa16b9d5ce42852b002bc2e4274afab84
|
refs/heads/master
| 2021-01-10T19:16:35.770411
| 2011-04-12T11:04:52
| 2011-04-12T11:04:52
| 34,972,670
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,696
|
odb
|
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Alex Dementsov
# California Institute of Technology
# (C) 2009 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from luban.content import select
from luban.content.Paragraph import Paragraph
from luban.content.Document import Document
from luban.content import load
from luban.content.Link import Link
from vinil.components.Actor import Actor as base
class Actor(base):
class Inventory(base.Inventory):
import pyre.inventory
id = pyre.inventory.str('id')
def content(self, director):
document = Document()
# Implement tests for jmd:
# 1. Echo
# 2. Simple calculator
document.add(Link(label="Test jmd",
onclick=select(id='test-jmd').replaceContent(Paragraph(text="World", id="test-jmd")) )
)
document.add(Paragraph(text=director.blah, id="test-jmd")) # "Hello"director.blah
return document
def pathlist(self, director):
self.pathlist = (["Home", None, None],)
return self.pathlist
def __init__(self, *args, **kwds):
super(Actor, self).__init__(*args, **kwds)
return
def _configure(self):
super(Actor, self)._configure()
self.id = self.inventory.id
return
def _init(self):
super(Actor, self)._init()
return
def actor():
return Actor('test/index')
__date__ = "$Nov 12, 2009 1:26:34 PM$"
|
[
"dexity@gmail.com"
] |
dexity@gmail.com
|
7f75e3a4e524969dd57adf3d766bf1a31c84bf50
|
9709a98a04285d86acad6112bc335e8f2995e9b1
|
/Widgets/RearrangeMod/DialogMod.py
|
dcfb00442446b74f3667febe598df5138812543d
|
[] |
no_license
|
redhog/webwidgets
|
37eb56d92c7421207f78f73961a8b58dc7592ebb
|
cd0094db9f9f1348ab380d7aef40ab23a7b8b1ba
|
refs/heads/master
| 2021-01-17T02:59:45.566859
| 2013-04-06T21:14:34
| 2013-04-06T21:14:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,056
|
py
|
#! /bin/env python
# -*- coding: utf-8 -*-
# vim: set fileencoding=utf-8 :
# Webwidgets web developement framework
# Copyright (C) 2006 uAnywhere, Egil Moeller <redhog@redhog.org>
# Copyright (C) 2007 Egil Moeller <redhog@redhog.org>
# Copyright (C) 2007 FreeCode AS, Egil Moeller <egil.moller@freecode.no>
# Copyright (C) 2007 FreeCode AS, Axel Liljencrantz <axel.liljencrantz@freecode.no>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Widgets for user input.
"""
import types
import Webwidgets.Utils
import Webwidgets.Constants
import Webwidgets.Widgets.Base
import Webwidgets.Widgets.ApplicationMod.WindowMod
import Webwidgets.Widgets.InputMod.BaseInput
import Webwidgets.Widgets.FormattingMod.BaseFormatting
class InfoFrame(Webwidgets.Widgets.Base.StaticComposite):
def draw_head(self, children, output_options):
if 'Head' not in children:
children['Head'] = children['Body'].title
return """<div class="%(html_head_classes)s" id="%(ww_untranslated__html_id)s-head">
%(Head)s
</div>""" % children
def draw_body(self, children, output_options):
return """<div class="%(html_body_classes)s" id="%(ww_untranslated__html_id)s-body">
%(Body)s
</div>""" % children
def draw_foot(self, children, output_options):
return ""
def draw(self, output_options):
children = self.draw_children(
output_options,
invisible_as_empty = True,
include_attributes = True)
children['html_head_classes'] = Webwidgets.Utils.classes_to_css_classes(self.ww_classes, ['head'])
children['html_body_classes'] = Webwidgets.Utils.classes_to_css_classes(self.ww_classes, ['body'])
children['html_foot_classes'] = Webwidgets.Utils.classes_to_css_classes(self.ww_classes, ['foot'])
children['head'] = self.draw_head(children, output_options)
children['body'] = self.draw_body(children, output_options)
children['foot'] = self.draw_foot(children, output_options)
return """
<div %(html_attributes)s>
%(head)s
%(body)s
%(foot)s
</div>
""" % children
class StaticDialog(InfoFrame):
"""Dialogs provides an easy way to let the user select one of a
few different options, while providing the user with some longer
explanation/description of the options. Options are described
using a dictionary of description-value pairs."""
__wwml_html_override__ = False
buttons = {'Cancel': '0', 'Ok': '1'}
def draw_foot(self, children, output_options):
return """<div class="%(html_foot_classes)s" id="%(ww_untranslated__html_id)s-foot">
%(Buttons)s
</div>""" % children
class Buttons(Webwidgets.Widgets.InputMod.BaseInput.ButtonArray):
def selected(self, path, value):
self.parent.notify('selected', value)
raise StopIteration
class Buttons(object):
def __get__(self, instance, owner):
if not instance.parent: return None
return instance.parent.buttons
buttons = Buttons()
class AbstractDialog(StaticDialog, Webwidgets.Widgets.Base.DirectoryServer):
remove_on_close = False
def draw(self, output_options):
Webwidgets.Widgets.ApplicationMod.WindowMod.HtmlWindow.register_script_link(
self,
self.calculate_url_to_directory_server(
'Webwidgets.Dialog',
['Dialog','dialog_iefix.js'],
output_options))
return StaticDialog.draw(self, output_options)
def close(self):
if self.remove_on_close:
del self.parent[self.name]
else:
self.visible = False
def selected(self, path, value):
if path != self.path: return
self.close()
class Dialog(AbstractDialog):
pass
class AbstractInfoDialog(AbstractDialog):
pass
class InfoDialog(AbstractInfoDialog):
buttons = {'Ok': '1'}
class ConfirmationDialog(AbstractInfoDialog):
class Head(Webwidgets.Widgets.FormattingMod.BaseFormatting.Html):
html = """Really perform action?"""
class Body(Webwidgets.Widgets.FormattingMod.BaseFormatting.Html):
html = """Do you really want to perform this action?"""
class DisableConfirmationDialog(ConfirmationDialog):
class Head(Webwidgets.Widgets.FormattingMod.BaseFormatting.Html):
html = """Really disable this item?"""
class Body(Webwidgets.Widgets.FormattingMod.BaseFormatting.Html):
html = """Do you really want to disable this item?"""
class DeleteConfirmationDialog(ConfirmationDialog):
class Head(Webwidgets.Widgets.FormattingMod.BaseFormatting.Html):
html = """Really delete this item?"""
class Body(Webwidgets.Widgets.FormattingMod.BaseFormatting.Html):
html = """Do you really want to delete this item?"""
class DialogContainer(Webwidgets.Widgets.FormattingMod.BaseFormatting.Div):
is_dialog_container = True
__wwml_html_override__ = False
html = "%(Dialogs)s%(Body)s"
class Dialogs(Webwidgets.Widgets.FormattingMod.BaseFormatting.ReplacedList): pass
class Body(Webwidgets.Widgets.FormattingMod.BaseFormatting.Html): pass
def add_dialog(self, dialog, name = None):
if name is None: name = str(len(self['Dialogs'].children))
self['Dialogs'][name] = dialog
dialog.remove_on_close = True
def add_dialog_to_nearest(cls, widget, dialog, name = None):
widget.get_ansestor_by_attribute(
"is_dialog_container", True
).add_dialog(dialog, name)
add_dialog_to_nearest = classmethod(add_dialog_to_nearest)
class Hide(Webwidgets.Widgets.Base.StaticComposite):
"""
A hide/show widget
Change the value of the title variable to change the text in the button.
TODO:
Implement an alternative javascript implementation for faster
update at the expense of longer reloads
"""
class HideButton(Webwidgets.Widgets.InputMod.BaseInput.ToggleButton):
true_title = "Hide"
false_title = "Show"
def draw(self, path):
self['Child'].visible = self['HideButton'].value
children = self.draw_children(path, invisible_as_empty=True, include_attributes=True)
return """<div %(html_attributes)s>%(HideButton)s %(Child)s</div>""" % children
|
[
"egil.moller@freecode.no"
] |
egil.moller@freecode.no
|
dac6b6d0e619f601a6ec338da6db45412c183f49
|
6191bad7750404bc0bcaec43a8dea51b52980f04
|
/Seção_06/Exercício_51.py
|
0228181ef57c32ca0fe87335b02289edbf78a35c
|
[] |
no_license
|
Lehcs-py/guppe
|
abfbab21c1b158b39251fa6234a4a98ce5f31c2a
|
2ff007bce88e065e6d3020971efd397ec7f7084b
|
refs/heads/main
| 2023-02-26T18:43:06.052699
| 2021-02-07T18:22:53
| 2021-02-07T18:22:53
| 330,180,078
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 541
|
py
|
print("""
51. Um funcionário recebe aumento anual. Em 1995 foi contratado por 2000 reais. Em 1996 recebeu aumento de 1.5%.
A partir de 1997, os aumentos sempre correspondem ao dobro do ano anterior. Faça programa que determine o salário atual do funcionário.
""")
salario_variavel = 2000
salario_final = 0
porcento = 0.75
for num in range((2021 - 1995) + 1):
porcento *= 2
salario_final = (salario_variavel + ((salario_variavel / 100) * porcento))
salario_variavel = salario_final
print(f'Salário final: {salario_final}')
|
[
"noreply@github.com"
] |
Lehcs-py.noreply@github.com
|
2193b859ac3796c8cbc52c1b23cf377f2ce0eeb6
|
bb150497a05203a718fb3630941231be9e3b6a32
|
/inference/python_api_test/test_int8_model/base_mkldnn_int8.py
|
36875b65f27555bcf814ad671a8399dc9eea69a8
|
[] |
no_license
|
PaddlePaddle/PaddleTest
|
4fb3dec677f0f13f7f1003fd30df748bf0b5940d
|
bd3790ce72a2a26611b5eda3901651b5a809348f
|
refs/heads/develop
| 2023-09-06T04:23:39.181903
| 2023-09-04T11:17:50
| 2023-09-04T11:17:50
| 383,138,186
| 42
| 312
| null | 2023-09-13T11:13:35
| 2021-07-05T12:44:59
|
Python
|
UTF-8
|
Python
| false
| false
| 5,869
|
py
|
"""
mkldnn_int8 base values
"""
mkldnn_int8 = {
"PPYOLOE": {
"model_name": "PPYOLOE",
"jingdu": {
"value": 0.008505799229272469,
"unit": "mAP",
"th": 0.05,
},
"xingneng": {
"value": 284.9,
"unit": "ms",
"batch_size": 1,
"th": 0.05,
},
},
"PicoDet": {
"model_name": "PicoDet",
"jingdu": {
"value": 0.29576267147717544,
"unit": "mAP",
"th": 0.05,
},
"xingneng": {
"value": 15.6,
"unit": "ms",
"batch_size": 1,
"th": 0.05,
},
},
"YOLOv5s": {
"model_name": "YOLOv5s",
"jingdu": {
"value": 0.337513986405508,
"unit": "mAP",
"th": 0.05,
},
"xingneng": {
"value": 41.9,
"unit": "ms",
"batch_size": 1,
"th": 0.05,
},
},
"YOLOv6s": {
"model_name": "YOLOv6s",
"jingdu": {
"value": 0.38167538696759734,
"unit": "mAP",
"th": 0.05,
},
"xingneng": {
"value": 36.3,
"unit": "ms",
"batch_size": 1,
"th": 0.05,
},
},
"YOLOv7": {
"model_name": "YOLOv7",
"jingdu": {
"value": 0.4599616751537943,
"unit": "mAP",
"th": 0.05,
},
"xingneng": {
"value": 101.8,
"unit": "ms",
"batch_size": 1,
"th": 0.05,
},
},
"ResNet_vd": {
"model_name": "ResNet_vd",
"jingdu": {
"value": 0.78542,
"unit": "acc",
"th": 0.05,
},
"xingneng": {
"value": 6.6,
"unit": "ms",
"batch_size": 1,
"th": 0.05,
},
},
"MobileNetV3_large": {
"model_name": "MobileNetV3_large",
"jingdu": {
"value": 0.70114,
"unit": "acc",
"th": 0.05,
},
"xingneng": {
"value": 4.8,
"unit": "ms",
"batch_size": 1,
"th": 0.05,
},
},
"PPLCNetV2": {
"model_name": "PPLCNetV2",
"jingdu": {
"value": 0.75986,
"unit": "acc",
"th": 0.05,
},
"xingneng": {
"value": 3.8,
"unit": "ms",
"batch_size": 1,
"th": 0.05,
},
},
"PPHGNet_tiny": {
"model_name": "PPHGNet_tiny",
"jingdu": {
"value": 0.77626,
"unit": "acc",
"th": 0.05,
},
"xingneng": {
"value": 8.0,
"unit": "ms",
"batch_size": 1,
"th": 0.05,
},
},
"EfficientNetB0": {
"model_name": "EfficientNetB0",
"jingdu": {
"value": 0.75366,
"unit": "acc",
"th": 0.05,
},
"xingneng": {
"value": 9.6,
"unit": "ms",
"batch_size": 1,
"th": 0.05,
},
},
"PP-HumanSeg-Lite": {
"model_name": "PP-HumanSeg-Lite",
"jingdu": {
"value": 0.9596980417424789,
"unit": "mIoU",
"th": 0.05,
},
"xingneng": {
"value": 42.2,
"unit": "ms",
"batch_size": 1,
"th": 0.05,
},
},
"PP-Liteseg": {
"model_name": "PP-Liteseg",
"jingdu": {
"value": 0.6646508698054427,
"unit": "mIoU",
"th": 0.05,
},
"xingneng": {
"value": 375.9,
"unit": "ms",
"batch_size": 1,
"th": 0.05,
},
},
"HRNet": {
"model_name": "HRNet",
"jingdu": {
"value": 0.7899464457999261,
"unit": "mIoU",
"th": 0.05,
},
"xingneng": {
"value": 532.6,
"unit": "ms",
"batch_size": 1,
"th": 0.05,
},
},
"UNet": {
"model_name": "UNet",
"jingdu": {
"value": 0.6434970135618086,
"unit": "mIoU",
"th": 0.05,
},
"xingneng": {
"value": 1105.8,
"unit": "ms",
"batch_size": 1,
"th": 0.05,
},
},
"Deeplabv3-ResNet50": {
"model_name": "Deeplabv3-ResNet50",
"jingdu": {
"value": 0.7900994083314681,
"unit": "mIoU",
"th": 0.05,
},
"xingneng": {
"value": 861.7,
"unit": "ms",
"batch_size": 1,
"th": 0.05,
},
},
"ERNIE_3.0-Medium": {
"model_name": "ERNIE_3.0-Medium",
"jingdu": {
"value": 0.6809545875810936,
"unit": "acc",
"th": 0.05,
},
"xingneng": {
"value": 102.71,
"unit": "ms",
"batch_size": 32,
"th": 0.05,
},
},
"PP-MiniLM": {
"model_name": "PP-MiniLM",
"jingdu": {
"value": 0.6899907321594069,
"unit": "acc",
"th": 0.05,
},
"xingneng": {
"value": 115.12,
"unit": "ms",
"batch_size": 32,
"th": 0.05,
},
},
"BERT_Base": {
"model_name": "BERT_Base",
"jingdu": {
"value": 0.051546658541685234,
"unit": "acc",
"th": 0.05,
},
"xingneng": {
"value": 18.94,
"unit": "ms",
"batch_size": 1,
"th": 0.05,
},
},
}
|
[
"noreply@github.com"
] |
PaddlePaddle.noreply@github.com
|
8210ae56f9b2440b7514fbf2b9ffd550ffa01dbd
|
b9481ebae49cf19de3b5718c69b84f1b59a8e421
|
/apps/quotes/migrations/0001_initial.py
|
feb7681a9e81c817dc6330890a45b12644142612
|
[] |
no_license
|
arun-skaria/eracks
|
06db7e3715afa2c6992fe09f05d6546520c65459
|
532d8a2be31199e7b78ca5e29944deb0a1400753
|
refs/heads/master
| 2023-01-08T01:40:10.036585
| 2017-07-13T13:10:42
| 2017-07-13T13:10:42
| 97,123,722
| 0
| 0
| null | 2022-12-26T20:16:17
| 2017-07-13T13:08:12
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,357
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('customers', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Quote',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('quote_number', models.CharField(help_text=b'eRacks quote id - letters/numbers/underscore/dashes ok, no spaces', unique=True, max_length=20)),
('valid_for', models.IntegerField(default=10, help_text=b'Number of days the quote is valid for')),
('purchase_order', models.CharField(help_text=b'Customer Purchase Order number, etc', max_length=20, blank=True)),
('customer_reference', models.CharField(help_text=b'Other customer reference number, RFQ, contact name, etc', max_length=30, blank=True)),
('terms', models.CharField(default=b'ccard', help_text=b'Net 5, Wire Transfer, ccard, etc', max_length=20)),
('discount', models.FloatField(default=0, help_text=b'Dollars or percent, according to type', blank=True)),
('discount_type', models.CharField(default=b'$', max_length=1, blank=True, choices=[(b'$', b'Dollars'), (b'%', b'Percent')])),
('shipping', models.FloatField(help_text=b'Estimated weight - lbs', blank=True)),
('shipping_method', models.CharField(help_text=b'UPS, FedEx, Freight, 3-Day, etc', max_length=40, blank=True)),
('target', models.FloatField(help_text=b"The customer's budget, or where the customer would like the quote to be")),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('approved_by', models.ForeignKey(default=2, to=settings.AUTH_USER_MODEL, help_text=b'Manager or admin person approving quote')),
('customer', models.ForeignKey(blank=True, to='customers.Customer', help_text=b'click "+" to create new', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='QuoteLineItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('model', models.CharField(help_text=b'eRacks Model name, eg "OPTERNATOR", or make one up for custom quotes', max_length=60)),
('quantity', models.IntegerField()),
('description', models.TextField(help_text=b'Start with a line for general description, then one config item per line for components')),
('cost', models.FloatField(help_text=b'our cost')),
('price', models.FloatField(help_text=b'customer price')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('quote', models.ForeignKey(to='quotes.Quote')),
],
options={
},
bases=(models.Model,),
),
]
|
[
"nijap@techversantinfotech.com"
] |
nijap@techversantinfotech.com
|
6a5765f8581e4dd9031765969d6f25ba5aa1ed0b
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_butterfly.py
|
59efacbed543d8dcd7c2d0e9cc28ef78436400d2
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 744
|
py
|
#calss header
class _BUTTERFLY():
def __init__(self,):
self.name = "BUTTERFLY"
self.definitions = [u'a type of insect with large, often brightly coloured wings', u'a person who is not responsible or serious, and who is likely to change activities easily or only be interested in pleasure: ', u'the small metal part put on the back of a stud (= piece of jewellery worn in the ear) that keeps it in place', u'a way of swimming on your front by kicking with your legs while raising your arms together out of the water and then bringing them down in front of you ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
f5aed57a1d491a9acda13887ef8b3b19ee883a79
|
4ef80242cf22a1ccd0d7a2042476b5b6ac1eb03e
|
/scadparser/commands/cmd_deps.py
|
121d7db81fe8369ef421c06396e918c8d957676c
|
[] |
no_license
|
rblack42/ScadParser
|
71081adb99ec03e78bc78b4101562b7fa1bab134
|
a9cc10b23c6515a53065dfb58b23881d0145f88d
|
refs/heads/master
| 2023-07-11T03:51:53.434534
| 2021-08-27T02:03:37
| 2021-08-27T02:03:37
| 397,718,873
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 279
|
py
|
from scadparser import __version__
import click
from scadparser.cli import pass_environment
@click.command("deps", help="Display dependency versions.")
@pass_environment
def cli(ctx):
"""Display current dependency versions."""
click.echo(f"scadparser: {__version__}")
|
[
"roie.black@gmail.com"
] |
roie.black@gmail.com
|
90cd71d7b8c6e81838f40845f2a33e8dd698090e
|
ae7ba9c83692cfcb39e95483d84610715930fe9e
|
/yubinbai/pcuva-problems/UVa 10496 - Collecting Beepers/main.py
|
bee7a62d1be3255db3c7266bb98c3130f4d8cc08
|
[] |
no_license
|
xenron/sandbox-github-clone
|
364721769ea0784fb82827b07196eaa32190126b
|
5eccdd8631f8bad78eb88bb89144972dbabc109c
|
refs/heads/master
| 2022-05-01T21:18:43.101664
| 2016-09-12T12:38:32
| 2016-09-12T12:38:32
| 65,951,766
| 5
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,456
|
py
|
'''
Created on Jul 15, 2013
@author: Yubin Bai
'''
import time
from multiprocessing.pool import Pool
parallelSolve = False
INF = 1 << 31
def solve(par):
N, M, startI, startJ, nBeepers, beepers = par
minCost = [INF]
path = set()
def backtrack(step, i, j, cost):
if cost > minCost:
return
if step == nBeepers:
cost += abs(i - startI) + abs(j - startJ)
minCost[0] = min(minCost[0], cost)
return
for i1, j1 in beepers:
if (i1, j1) not in path:
dist = abs(i1 - i) + abs(j1 - j)
path.add((i1, j1))
backtrack(step + 1, i1, j1, cost + dist)
path.remove((i1, j1))
backtrack(0, startI, startJ, 0)
return 'The shortest path has length %d' % minCost[0]
class Solver:
def getInput(self):
self.numOfTests = int(self.fIn.readline().strip())
self.input = []
for iterTest in range(self.numOfTests):
N, M = map(int, self.fIn.readline().strip().split())
startI, startJ = map(int, self.fIn.readline().strip().split())
nBeepers = int(self.fIn.readline())
beepers = []
for i in range(nBeepers):
beepers.append(tuple(map(int, self.fIn.readline().split())))
self.input.append((N, M, startI, startJ, nBeepers, beepers))
def __init__(self):
self.fIn = open('input.txt')
self.fOut = open('output.txt', 'w')
self.results = []
def parallel(self):
self.getInput()
p = Pool(4)
millis1 = int(round(time.time() * 1000))
self.results = p.map(solve, self.input)
millis2 = int(round(time.time() * 1000))
print("Time in milliseconds: %d " % (millis2 - millis1))
self.makeOutput()
def sequential(self):
self.getInput()
millis1 = int(round(time.time() * 1000))
for i in self.input:
self.results.append(solve(i))
millis2 = int(round(time.time() * 1000))
print("Time in milliseconds: %d " % (millis2 - millis1))
self.makeOutput()
def makeOutput(self):
for test in range(self.numOfTests):
self.fOut.write("%s\n" % self.results[test])
self.fIn.close()
self.fOut.close()
if __name__ == '__main__':
solver = Solver()
if parallelSolve:
solver.parallel()
else:
solver.sequential()
9
|
[
"xenron@outlook.com"
] |
xenron@outlook.com
|
c2f985cc04425adca0e7b65ce826d18b573f9ce1
|
8acffb8c4ddca5bfef910e58d3faa0e4de83fce8
|
/ml-flask/Lib/site-packages/flair/models/tars_tagger_model.py
|
ab659471cf8a9dab55842904ddabec9cb67d5ac5
|
[
"MIT"
] |
permissive
|
YaminiHP/SimilitudeApp
|
8cbde52caec3c19d5fa73508fc005f38f79b8418
|
005c59894d8788c97be16ec420c0a43aaec99b80
|
refs/heads/master
| 2023-06-27T00:03:00.404080
| 2021-07-25T17:51:27
| 2021-07-25T17:51:27
| 389,390,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:0d1594783dc93d010b71f1a23fcc38fb70ad5f99f0cf54f87f6b88c38486bc74
size 30192
|
[
"yamprakash130@gmail.com"
] |
yamprakash130@gmail.com
|
f44282bae37f63740ff1b8c780ad60d944f81ef9
|
64bf39b96a014b5d3f69b3311430185c64a7ff0e
|
/intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/cisco/asa/plugins/terminal/asa.py
|
83f339186d356b8421c95976a5f8e5b3f2cf6792
|
[
"MIT",
"GPL-3.0-only",
"LicenseRef-scancode-unknown-license-reference",
"GPL-3.0-or-later"
] |
permissive
|
SimonFangCisco/dne-dna-code
|
7072eba7da0389e37507b7a2aa5f7d0c0735a220
|
2ea7d4f00212f502bc684ac257371ada73da1ca9
|
refs/heads/master
| 2023-03-10T23:10:31.392558
| 2021-02-25T15:04:36
| 2021-02-25T15:04:36
| 342,274,373
| 0
| 0
|
MIT
| 2021-02-25T14:39:22
| 2021-02-25T14:39:22
| null |
UTF-8
|
Python
| false
| false
| 2,532
|
py
|
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import re
import json
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils._text import to_text, to_bytes
from ansible.plugins.terminal import TerminalBase
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(br"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$"),
]
terminal_stderr_re = [
re.compile(br"error:", re.I),
re.compile(br"Removing.* not allowed, it is being used"),
re.compile(br"^Command authorization failed\r?$", re.MULTILINE),
]
def on_open_shell(self):
if self._get_prompt().strip().endswith(b"#"):
self.disable_pager()
def disable_pager(self):
try:
self._exec_cli_command(u"no terminal pager")
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure("unable to disable terminal pager")
def on_become(self, passwd=None):
if self._get_prompt().strip().endswith(b"#"):
return
cmd = {u"command": u"enable"}
if passwd:
# Note: python-3.5 cannot combine u"" and r"" together. Thus make
# an r string and use to_text to ensure it's text on both py2 and py3.
cmd[u"prompt"] = to_text(
r"[\r\n]?[Pp]assword: $", errors="surrogate_or_strict"
)
cmd[u"answer"] = passwd
try:
self._exec_cli_command(
to_bytes(json.dumps(cmd), errors="surrogate_or_strict")
)
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure(
"unable to elevate privilege to enable mode"
)
self.disable_pager()
|
[
"sifang@cisco.com"
] |
sifang@cisco.com
|
1d2133d364a2de163a1af19cd4f8ebc83f4cb579
|
a84dfa25c827a2979a811513ac888288d378b980
|
/OpenCV/list15_8.py
|
314a875e0f200a5038a1a8889d9cceca9c0ed694
|
[] |
no_license
|
sunho-park/study1
|
d49b9d27b0069dbeb7cc31199177f6771a84d3be
|
0386fbea0282c2135407cad608b4ffa84b02d298
|
refs/heads/master
| 2022-12-16T23:17:14.746575
| 2020-09-11T06:04:06
| 2020-09-11T06:04:06
| 264,140,456
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 591
|
py
|
import numpy as np
import cv2
img = cv2.imread("./OpenCV/sample.jpg")
size = img.shape
# 이미지를 나타내는 행렬의 일부를 꺼내면 그것이 트리밍이 됩니다.
# n등분하려면 가로와 세로 크기를 나눕니다.
my_img = img[: size[0]//2, size[1]//3]
# 여기에서는 원래의 배율을 유지하면서 폭과 높이를 각각 2배로 합니다. 크기를 지정할 때는 (폭, 높이) 순서라는 점을 유의하세요
my_img = cv2.resize(my_img, (my_img.shape[1]*2, my_img.shape[0]*2))
cv2.imshow("sample", my_img)
cv2.imwrite("list15_8.jpg", my_img)
|
[
"reebox22@hanmail.net"
] |
reebox22@hanmail.net
|
a8dbeb3447ef7323b9161b5593319f0d7991ffac
|
c71af56951d1c661a5819db72da1caccd9130df2
|
/javascript/cpp-libraries/test1/binding.gyp
|
d1876d8625fb7543597db12f5121ca3f738e8656
|
[] |
no_license
|
adrianpoplesanu/personal-work
|
2940a0dc4e4e27e0cc467875bae3fdea27dd0d31
|
adc289ecb72c1c6f98582f3ea9ad4bf2e8e08d29
|
refs/heads/master
| 2023-08-23T06:56:49.363519
| 2023-08-21T17:20:51
| 2023-08-21T17:20:51
| 109,451,981
| 0
| 1
| null | 2022-10-07T04:53:24
| 2017-11-03T23:36:21
|
Python
|
UTF-8
|
Python
| false
| false
| 406
|
gyp
|
{
"targets": [
{
"target_name": "greet",
"cflags!": [ "-fno-exceptions" ],
"cflags_cc!": [ "-fno-exceptions" ],
"sources": [
"./src/greeting.cpp",
"./src/search.cpp",
"./src/index.cpp"
],
"include_dirs": [
"<!@(node -p \"require('node-addon-api').include\")"
],
'defines': [ 'NAPI_DISABLE_CPP_EXCEPTIONS' ],
}
]
}
|
[
"adrian.poplesanu@yahoo.com"
] |
adrian.poplesanu@yahoo.com
|
8039568ad432cafc9c4e614d96115a4addf76f96
|
dd6ee732613966b899df8a514f2907084e433c3f
|
/setup.py
|
4358797461aff42c90277e9271152ad16a5e1ec1
|
[] |
no_license
|
rixx/ramble
|
410588025f0cfae04d75078c5c007a53538b526e
|
f838171517035edfa03c1afacb0bd8cb157eb90a
|
refs/heads/master
| 2023-05-12T17:20:50.744796
| 2023-04-29T13:30:19
| 2023-04-29T13:30:19
| 244,879,387
| 1
| 0
| null | 2022-07-22T05:34:06
| 2020-03-04T11:08:16
|
Python
|
UTF-8
|
Python
| false
| false
| 424
|
py
|
from setuptools import setup
setup(
name="ramble-rixx-de",
author="Tobias Kunze",
author_email="r@rixx.de",
url="https://github.com/rixx/ramble.rixx.de",
packages=["scripts"],
entry_points="""
[console_scripts]
posts=scripts.cli:cli
""",
install_requires=[
"click",
"inquirer==2.6.*",
"python-frontmatter==0.5.*",
"unidecode==1.1.*",
],
)
|
[
"r@rixx.de"
] |
r@rixx.de
|
9b0ec8b0123f758441aa60a40a32b9f3d96346c3
|
b3330bd3365767b89afb9c432f4deb722b39ac1c
|
/python/last_nth_element.py
|
899c425e6c5110fa5844947eec110e1d1acb856e
|
[] |
no_license
|
hguochen/algorithms
|
944df332d5b39220bd59cbd62dc74b12e335fb9e
|
703e71a5cd9e002d800340df879ed475a404d092
|
refs/heads/master
| 2022-02-27T12:11:10.607042
| 2022-02-18T21:04:00
| 2022-02-18T21:04:00
| 13,767,503
| 5
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 919
|
py
|
##################################
### Title: Last Nth element ######
### Author: GuoChen Hou ########
##################################
# Implement an algorithm to find the nth to last element of a
# singly linked list.
from ADT.LinkedList import LinkedList
class NthLinkedList(LinkedList):
def nth_to_last(self, position):
if self.size is 0:
return
# get the node position counting from head
node_position = self.size - position - 1 # offset since node starts at 1 instead of 0
trav = self.head
while trav is not None and node_position is not 0:
trav = trav.next
node_position -= 1
return trav.data
if __name__ == "__main__":
test_list = NthLinkedList()
test_list.insert(1)
test_list.insert(2)
test_list.insert(3)
test_list.insert(4)
test_list.print_list()
print test_list.nth_to_last(2)
|
[
"hguochen@gmail.com"
] |
hguochen@gmail.com
|
e0410615b113f2b713aca3503c38d512f4309812
|
941c1bfd4edf4619c4b66391453abe8994ccc0bc
|
/src/api/admin.py
|
1ca790712ac550adfc72a4853ebdc86d2bb83b80
|
[
"MIT"
] |
permissive
|
websiteinspiration/back-end
|
11a0da9fb1b252557305b56867b1adc82c5da66b
|
e9762149aaa3ce08278e357950b35ac168122d95
|
refs/heads/master
| 2020-06-08T20:39:36.513962
| 2019-06-21T14:38:15
| 2019-06-21T14:38:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,496
|
py
|
from django.contrib import admin
from api.models import (
CodeSchool,
Location,
Scholarship,
ScholarshipApplication,
TeamMember,
)
@admin.register(Scholarship)
class ScholarshipAdmin(admin.ModelAdmin):
list_display = (
"name",
"location",
"open_time",
"close_time",
"created_at",
"updated_at",
)
@admin.register(ScholarshipApplication)
class ScholarshipApplicationAdmin(admin.ModelAdmin):
list_display = ("user", "scholarship", "terms_accepted", "created_at", "updated_at")
@admin.register(TeamMember)
class TeamMemberAdmin(admin.ModelAdmin):
list_display = ("name", "email", "role", "group", "image_src")
@admin.register(Location)
class LocationAdmin(admin.ModelAdmin):
list_display = (
"code_school",
"va_accepted",
"address1",
"address2",
"city",
"state",
"zip",
)
@admin.register(CodeSchool)
class CodeSchoolAdmin(admin.ModelAdmin):
list_display = (
"name",
"url",
"full_time",
"hardware_included",
"has_online",
"online_only",
"has_housing",
"mooc",
"is_partner",
"rep_name",
"rep_email",
)
list_filter = (
"full_time",
"hardware_included",
"has_online",
"online_only",
"has_housing",
"mooc",
"is_partner",
)
search_fields = ("name", "rep_name", "rep_email", "url")
|
[
"abanthes@gmail.com"
] |
abanthes@gmail.com
|
486c7da5a8c0c5378fe0a03acb83e39ba404cc7c
|
ad16b0c0178e4543d0c44ad3d90f90c6beeb4f5a
|
/filter_array_by_column.py
|
f45a6bb704a3f3db5b46886e74be352c30600a4b
|
[] |
no_license
|
timmonspatrick/HemoDub
|
09cb61e8e33ee8b64c9e6011d4ae8679d07950d9
|
4e6cceb44456c498cc1d6d55f8369099d0d5d947
|
refs/heads/master
| 2021-04-27T09:34:40.935684
| 2018-05-31T08:29:04
| 2018-05-31T08:29:04
| 122,491,500
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 600
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 2 12:20:31 2018
@author: Patrick
"""
import numpy as np
def filter_array_by_column(X, cut_off=0.95):
n_cols = X.shape[1]
bad_cols = set()
for n in range(n_cols):
Y = X[:,n]
unique, counts = np.unique(Y, return_counts=True)
counts_sum = sum(counts)
counts = [i / counts_sum for i in counts]
if len([i for i in counts if i >= cut_off]) > 0:
bad_cols.add(n)
good_cols = [i for i in range(n_cols) if i not in bad_cols]
X_new = X[:,good_cols]
return X_new
|
[
"timmons.patrick@outlook.com"
] |
timmons.patrick@outlook.com
|
6caaa7467d19c252f251757d6eb6c91863cc3273
|
fb124e51024917d6479fa626d9607ff10f7a3aba
|
/storm-control/storm_control/steve/qtdesigner/steve_ui.py
|
221af7d78ac059e59e624ce48a2a75c603c577dc
|
[
"MIT"
] |
permissive
|
BehnamAbaie/storm-control
|
054bd7bbd903ed9635e4d1121c30544f58473c4f
|
0c686321142eccad62ce3365eae22c3b69229b0d
|
refs/heads/main
| 2023-06-18T08:04:01.108874
| 2021-07-14T00:51:15
| 2021-07-14T00:51:15
| 342,049,487
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,764
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'steve.ui'
#
# Created by: PyQt5 UI code generator 5.11.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1148, 831)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setObjectName("tabWidget")
self.mosaicTab = QtWidgets.QWidget()
self.mosaicTab.setObjectName("mosaicTab")
self.tabWidget.addTab(self.mosaicTab, "")
self.sectionsTab = QtWidgets.QWidget()
self.sectionsTab.setObjectName("sectionsTab")
self.tabWidget.addTab(self.sectionsTab, "")
self.horizontalLayout.addWidget(self.tabWidget)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1148, 22))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuMosaic = QtWidgets.QMenu(self.menubar)
self.menuMosaic.setObjectName("menuMosaic")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionQuit = QtWidgets.QAction(MainWindow)
self.actionQuit.setObjectName("actionQuit")
self.actionConnect = QtWidgets.QAction(MainWindow)
self.actionConnect.setObjectName("actionConnect")
self.actionDisconnect = QtWidgets.QAction(MainWindow)
self.actionDisconnect.setObjectName("actionDisconnect")
self.actionSave_Positions = QtWidgets.QAction(MainWindow)
self.actionSave_Positions.setObjectName("actionSave_Positions")
self.actionSave_Mosaic = QtWidgets.QAction(MainWindow)
self.actionSave_Mosaic.setObjectName("actionSave_Mosaic")
self.actionSet_Working_Directory = QtWidgets.QAction(MainWindow)
self.actionSet_Working_Directory.setObjectName("actionSet_Working_Directory")
self.actionLoad_Mosaic = QtWidgets.QAction(MainWindow)
self.actionLoad_Mosaic.setObjectName("actionLoad_Mosaic")
self.actionDelete_Images = QtWidgets.QAction(MainWindow)
self.actionDelete_Images.setObjectName("actionDelete_Images")
self.actionLoad_Positions = QtWidgets.QAction(MainWindow)
self.actionLoad_Positions.setObjectName("actionLoad_Positions")
self.actionSave_Snapshot = QtWidgets.QAction(MainWindow)
self.actionSave_Snapshot.setObjectName("actionSave_Snapshot")
self.actionLoad_Movies = QtWidgets.QAction(MainWindow)
self.actionLoad_Movies.setObjectName("actionLoad_Movies")
self.actionLoad_Dax_By_Pattern = QtWidgets.QAction(MainWindow)
self.actionLoad_Dax_By_Pattern.setObjectName("actionLoad_Dax_By_Pattern")
self.actionAdjust_Contrast = QtWidgets.QAction(MainWindow)
self.actionAdjust_Contrast.setObjectName("actionAdjust_Contrast")
self.menuFile.addAction(self.actionSet_Working_Directory)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionDelete_Images)
self.menuFile.addAction(self.actionLoad_Movies)
self.menuFile.addAction(self.actionLoad_Mosaic)
self.menuFile.addAction(self.actionLoad_Positions)
self.menuFile.addAction(self.actionSave_Mosaic)
self.menuFile.addAction(self.actionSave_Positions)
self.menuFile.addAction(self.actionSave_Snapshot)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionQuit)
self.menuMosaic.addAction(self.actionAdjust_Contrast)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuMosaic.menuAction())
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Steve"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.mosaicTab), _translate("MainWindow", "Mosaic"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.sectionsTab), _translate("MainWindow", "Sections"))
self.menuFile.setTitle(_translate("MainWindow", "Fi&le"))
self.menuMosaic.setTitle(_translate("MainWindow", "Mosaic"))
self.actionQuit.setText(_translate("MainWindow", "&Quit (Ctrl+Q)"))
self.actionQuit.setShortcut(_translate("MainWindow", "Ctrl+Q"))
self.actionConnect.setText(_translate("MainWindow", "Connect"))
self.actionDisconnect.setText(_translate("MainWindow", "Disconnect"))
self.actionSave_Positions.setText(_translate("MainWindow", "Sav&e Positions"))
self.actionSave_Positions.setShortcut(_translate("MainWindow", "Ctrl+T"))
self.actionSave_Mosaic.setText(_translate("MainWindow", "Sa&ve Mosaic"))
self.actionSave_Mosaic.setShortcut(_translate("MainWindow", "Ctrl+S"))
self.actionSet_Working_Directory.setText(_translate("MainWindow", "&Set Working Directory"))
self.actionLoad_Mosaic.setText(_translate("MainWindow", "Load &Mosaic"))
self.actionLoad_Mosaic.setShortcut(_translate("MainWindow", "Ctrl+M"))
self.actionDelete_Images.setText(_translate("MainWindow", "&Delete Images"))
self.actionDelete_Images.setShortcut(_translate("MainWindow", "Ctrl+D"))
self.actionLoad_Positions.setText(_translate("MainWindow", "Load &Positions"))
self.actionLoad_Positions.setShortcut(_translate("MainWindow", "Ctrl+P"))
self.actionSave_Snapshot.setText(_translate("MainWindow", "Save S&napshot"))
self.actionSave_Snapshot.setShortcut(_translate("MainWindow", "Ctrl+I"))
self.actionLoad_Movies.setText(_translate("MainWindow", "&Load Movie(s)"))
self.actionLoad_Movies.setShortcut(_translate("MainWindow", "Ctrl+L"))
self.actionLoad_Dax_By_Pattern.setText(_translate("MainWindow", "Load Dax By Pattern"))
self.actionAdjust_Contrast.setText(_translate("MainWindow", "Adjust Contrast"))
|
[
"noreply@github.com"
] |
BehnamAbaie.noreply@github.com
|
e7f9ef879ca1ae30a2bd11327b902db8fc44b076
|
3f5a1ef51620fd8c35ef38064ca5aa00776ab6f4
|
/ds_and_algo_educative/Doubly_LinkedList/Reverse.py
|
80a3dfd6673c3b0a39f02c490c0536c87db82c1b
|
[] |
no_license
|
poojagmahajan/python_exercises
|
1b290a5c0689f703538caf89bca5bc6c1fdb392a
|
65539cf31c5b2ad5768d652ed5fe95054ce5f63f
|
refs/heads/master
| 2022-11-12T03:52:13.533781
| 2020-07-04T20:50:29
| 2020-07-04T20:54:46
| 263,151,942
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 978
|
py
|
class Node:
def __init__(self, data):
self.data = data
self.next = None
self.prev = None
class DoublyLinkedList:
def __init__(self):
self.head = None
def append(self, data):
if self.head is None:
new_node = Node(data)
new_node.prev = None
self.head = new_node
else:
new_node = Node(data)
cur = self.head
while cur.next:
cur = cur.next
cur.next = new_node
new_node.prev = cur
new_node.next = None
def print_list(self):
cur = self.head
while cur:
print(cur.data)
cur = cur.next
def reverse(self):
tmp = None
cur = self.head
while cur:
tmp = cur.prev
cur.prev = cur.next
cur.next = tmp
cur = cur.prev
if tmp:
self.head = tmp.prev
dllist = DoublyLinkedList()
dllist.append(1)
dllist.append(2)
dllist.append(3)
dllist.append(4)
dllist.print_list()
print("\n Reverse list is:")
dllist.reverse()
dllist.print_list()
|
[
"mahajanpoojag@gmail.com"
] |
mahajanpoojag@gmail.com
|
d1cece45846995c7aadd790e8a6b01fc5cea7f56
|
7dc65b6d2e857c807bd2f75e2586af5f8e933fe5
|
/tcutils/parsers/pingparse.py
|
51a28244cc66ce56730f37eb0238cc8ccf493219
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
vkolli/contrail-test-perf
|
d6fdc20f4a2004066c5a6316afd915ecdc9366c2
|
db04b8924a2c330baabe3059788b149d957a7d67
|
refs/heads/master
| 2021-01-18T15:36:18.120487
| 2017-03-30T19:19:30
| 2017-03-30T19:19:30
| 86,661,522
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 786
|
py
|
"parser to parse the ping output."""
import re
class PingParser(object):
"""Parser to parse the ping output."""
def __init__(self, output):
self.output = output
self.parsed_output = {}
self.parse()
def parse(self):
match = re.search(
"rtt\s+(min/avg/max/mdev)\s+=\s+(\d+.\d+/\d+.\d+/\d+.\d+/\d+.\d+)\s+(\w+)", self.output)
output_req = []
output_req.append(match.group(1))
output_req.append(match.group(2))
self.parsed_output = dict(
zip(output_req[0].split('/'), output_req[1].split('/')))
self.parsed_output['unit'] = match.group(3)
def get_ping_latency(self):
ping_output=self.parsed_output['avg']+" "+self.parsed_output['unit']
return ping_output
|
[
"root@5b3s45.contrail.juniper.net"
] |
root@5b3s45.contrail.juniper.net
|
cef019d360cd07dab58312341d87ea996f7a6c32
|
abc1a497c41ddd8669c8c41da18af65d08ca54e4
|
/Analysis2gamma/fit/admin.py
|
230d2f8df1a1fe4c5666176d8b51793ebc0ade35
|
[] |
no_license
|
gerakolt/direxeno_privet
|
fcef5e3b654720e277c48935acc168472dfd8ecc
|
75e88fb1ed44fce32fce02677f64106121259f6d
|
refs/heads/master
| 2022-12-20T22:01:30.825891
| 2020-10-04T06:01:07
| 2020-10-04T06:01:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,162
|
py
|
import multiprocessing
import numpy as np
import sys
import time
from memory_profiler import profile
# Rec=np.recarray(5000, dtype=[
# ('Q', 'f8', len(pmts)),
# ('T', 'f8', len(pmts)),
# ('St', 'f8', len(pmts)),
# ('mu', 'f8', 1),
# ('N', 'f8', 1),
# ('F', 'f8', 1),
# ('Tf', 'f8', 1),
# ('Ts', 'f8', 1),
# ('R', 'f8', 1),
# ('a', 'f8', 1),
# ('eta', 'f8', 1),
# ])
n=6
def make_glob_array(p):
Q=multiprocessing.Array('d', p[:n])
T=multiprocessing.Array('d', p[n:2*n])
St=multiprocessing.Array('d', p[2*n:3*n])
mu=multiprocessing.Array('d', [p[3*n]])
W=multiprocessing.Array('d', [p[3*n+1]])
g=multiprocessing.Array('d', [p[3*n+2]])
F=multiprocessing.Array('d', [p[3*n+3]])
Tf=multiprocessing.Array('d', [p[3*n+4]])
Ts=multiprocessing.Array('d', [p[3*n+5]])
R=multiprocessing.Array('d', [p[3*n+6]])
a=multiprocessing.Array('d', [p[3*n+7]])
return Q, T, St, mu, W, g, F, Tf, Ts, R, a
def make_iter(N, Q, T, St, F, Tf, Ts, R, a, v):
for i in range(len(N)):
np.random.seed(int(i*time.time()%2**32))
yield [Q, T, St, N[i], F, Tf, Ts, R, a, v[i]]
|
[
"gerakolt@gmail.com"
] |
gerakolt@gmail.com
|
4f359ebcc8ffaef000df67e034006f85c9765a5f
|
f0b741f24ccf8bfe9bd1950425d83b6291d21b10
|
/samples/v2/pipeline_with_volume.py
|
47b9099c6e00ae80368dc3da8796a43c37d26cab
|
[
"Apache-2.0"
] |
permissive
|
kubeflow/pipelines
|
e678342b8a325559dec0a6e1e484c525fdcc8ce8
|
3fb199658f68e7debf4906d9ce32a9a307e39243
|
refs/heads/master
| 2023-09-04T11:54:56.449867
| 2023-09-01T19:07:33
| 2023-09-01T19:12:27
| 133,100,880
| 3,434
| 1,675
|
Apache-2.0
| 2023-09-14T20:19:06
| 2018-05-12T00:31:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,890
|
py
|
# Copyright 2023 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pipeline with volume creation, mount and deletion in v2 engine pipeline."""
from kfp import dsl
from kfp import kubernetes
@dsl.component
def producer() -> str:
with open('/data/file.txt', 'w') as file:
file.write('Hello world')
with open('/data/file.txt', 'r') as file:
content = file.read()
print(content)
return content
@dsl.component
def consumer() -> str:
with open('/data/file.txt', 'r') as file:
content = file.read()
print(content)
return content
@dsl.pipeline
def pipeline_with_volume():
pvc1 = kubernetes.CreatePVC(
pvc_name_suffix='-my-pvc',
access_modes=['ReadWriteOnce'],
size='5Mi',
storage_class_name='standard',
)
task1 = producer()
task2 = consumer().after(task1)
kubernetes.mount_pvc(
task1,
pvc_name=pvc1.outputs['name'],
mount_path='/data',
)
kubernetes.mount_pvc(
task2,
pvc_name=pvc1.outputs['name'],
mount_path='/data',
)
delete_pvc1 = kubernetes.DeletePVC(
pvc_name=pvc1.outputs['name']).after(task2)
if __name__ == '__main__':
# execute only if run as a script
compiler.Compiler().compile(
pipeline_func=pipeline_with_volume,
package_path='pipeline_with_volume.json')
|
[
"noreply@github.com"
] |
kubeflow.noreply@github.com
|
505d724b709f9a90e03e85a4ff7a185472bcbe00
|
2c22736309a50968896b4724df4a7a1d1a150d88
|
/0x0F-python-object_relational_mapping/12-model_state_update_id_2.py
|
0c0445e0c1b773c8f23caefc87d4363ffd43d9b0
|
[] |
no_license
|
gcifuentess/holbertonschool-higher_level_programming
|
ce9f263c0eef07facc1e02b719a8ae7193233d6d
|
75e405ec7f1aa9138aa54e86f7b41aa08ead7f2a
|
refs/heads/master
| 2023-06-18T08:36:22.580908
| 2021-07-18T20:46:40
| 2021-07-18T20:46:40
| 291,871,342
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 718
|
py
|
#!/usr/bin/python3
'''
Query with SQLAlchemy, changes the name of a State object
from the database hbtn_0e_6_usa
'''
from sys import argv
from model_state import Base, State
from sqlalchemy import (create_engine)
from sqlalchemy.orm import sessionmaker
if __name__ == "__main__":
engine = create_engine('mysql+mysqldb://{}:{}@localhost:3306/{}'
.format(argv[1], argv[2], argv[3]),
encoding='utf-8', pool_pre_ping=True)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
to_update = session.query(State).filter_by(id=2).first()
to_update.name = "New Mexico"
session.commit()
session.close()
|
[
"1795@holbertonschool.com"
] |
1795@holbertonschool.com
|
036fd1629ac4f8c3c9ece01c9b37124bd5d8a92b
|
759f52976ad2cd9236da561ca254e11e08003487
|
/part5/ex32/proc/guess_number_core.py
|
1cfa8a70b0f502de992f5608518efcd3bacb1b16
|
[] |
no_license
|
mbaeumer/fiftyseven
|
57b571c3e09640a2ab0ed41e5d06643c12b48001
|
d79b603d5b37bf1f4127d9253f8526ea3897dc08
|
refs/heads/master
| 2020-06-10T20:52:25.311992
| 2017-11-15T18:28:38
| 2017-11-15T18:28:38
| 75,877,462
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,701
|
py
|
#!/usr/bin/python
import random
from level import Difficulty
from comparison_result import ComparisonResult
def generate_my_number(difficulty_level):
max = 10
if difficulty_level == Difficulty.MEDIUM:
max = 50
elif difficulty_level == Difficulty.HARD:
max = 100
number = random.randint(0,max)
return number
# get the user's input as string
def get_user_guess(attempts):
message = "What is your next guess: "
if attempts == 1:
message = "Make the first guess: "
guess = input(message)
return guess
def get_difficulty_level():
choice = 0
while choice < 1 or choice > 3:
print("Select difficulty")
print("Easy \t - 1")
print("Medium \t - 2")
print("Hard \t - 3")
try:
choice = int(input("Your choice: "))
except ValueError:
print("ERROR: Please enter a valid choice!")
choice = 0
difficulty_level = Difficulty(choice)
print(difficulty_level)
return difficulty_level
def get_play_again():
user_input = ''
while user_input != 'y' and user_input != 'n':
user_input = input("Play again? ")
return user_input == 'y'
def isValidGuess(guess_as_string):
try:
guess = int(guess_as_string)
return True
except ValueError:
return False
def validateGuess(guess, my_number):
if my_number > guess:
return ComparisonResult.HIGHER
elif my_number < guess:
return ComparisonResult.LOWER
return ComparisonResult.EQUAL
def get_validation_message(comparison_result):
message = "You got it"
if comparison_result == ComparisonResult.HIGHER:
message = "The number is higher"
elif comparison_result == ComparisonResult.LOWER:
message = "The number is lower"
return message
|
[
"martin.baeumer@gmail.com"
] |
martin.baeumer@gmail.com
|
12a2a28e54708661a3440be67a2a67a961697c4d
|
fb98249ee3dece1f3ec5a7b2ba541a5ca07d170b
|
/python/developer.usr.py
|
86e797cfd9a3b14372b77b83c10b4ecd3db8ec43
|
[] |
no_license
|
polikashechkin/developer
|
5d4f50783c96fbe2078423ff689d91ab47408f05
|
f9c6193440457ba4e78d4b5430a1d21c34cd9af1
|
refs/heads/main
| 2023-02-10T03:39:40.175655
| 2021-01-04T23:30:09
| 2021-01-04T23:30:09
| 326,832,873
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,115
|
py
|
#!/usr/bin/python3.6
import sys, os, shutil, json
#os.chdir(os.dirname(__file__))
from domino.core import log, Version, Server, ProductInfo, VersionInfo, DOMINO_ROOT
from domino.jobs import Job, Задача
from domino.globalstore import GlobalStore
from domino.cli import Console, print_error, print_comment, print_warning, print_help
def arg(n):
try:
return sys.argv[n].lower()
except:
return None
class PRODUCT:
@staticmethod
def create(product):
draft = max(Server.get_drafts('domino'))
product_folder = Server.product_folder(product)
version_folder = Server.version_folder(product, draft)
os.makedirs(version_folder, exist_ok=True)
os.makedirs(os.path.join(version_folder, 'web'), exist_ok=True)
os.makedirs(os.path.join(version_folder, 'python'), exist_ok=True)
with open(os.path.join(product_folder, 'info.json'), 'w') as f:
json.dump({'id':product},f)
info = VersionInfo()
info.version = draft
info.product = product
info.write(version_folder)
#manifest = {'', 'products' : []}
def help():
print(os.path.abspath(__file__))
print('')
print('product.create\tСоздание нового продукта')
print('')
def последняя_версия_продукта(product):
последняя_версия = None
for name in os.listdir(f'/DOMINO/products/{product}'):
try:
версия = Version.parse(name)
if версия is not None and версия.is_draft:
if последняя_версия is None or последняя_версия < версия:
последняя_версия = версия
except:
pass
return последняя_версия
def hard_link(file_name, common_folder, product_folder):
common_file = os.path.join(common_folder, file_name)
product_file = os.path.join(product_folder, file_name)
if not os.path.isfile(common_file):
print_warning(f'Нет файла "{common_file}"')
return
os.makedirs(os.path.dirname(product_file), exist_ok=True)
if os.path.isfile(product_file):
os.remove(product_file)
os.link(common_file, product_file)
print_help(file_name)
print_comment(f'{common_file} => {product_file}')
def copy_folder(product, draft, folder):
draft = f'{draft}'
product_dir = os.path.join(DOMINO_ROOT, 'products', product, f'{draft}', 'python', 'domino', folder)
common_dir = os.path.join(DOMINO_ROOT,'products','_system','python', folder)
for name0 in os.listdir(common_dir):
dir0 = os.path.join(common_dir, name0)
if os.path.isdir(dir0):
for name1 in os.listdir(dir0):
dir1 = os.path.join(dir0, name1)
if os.path.isdir(dir1):
for name2 in os.listdir(dir1):
dir2 = os.path.join(dir1, name2)
if os.path.isdir(dir2):
pass
else:
hard_link(os.path.join(name0, name1, name2), common_dir, product_dir)
else:
hard_link(os.path.join(name0, name1), common_dir, product_dir)
else:
hard_link(name0, common_dir, product_dir)
if __name__ == "__main__":
dir = os.path.dirname(os.path.abspath(__file__))
gs = GlobalStore()
action = arg(1)
if action is None:
help()
elif action == 'create_next':
product = arg(2)
#draft = max(Server.get_drafts(product), default = None)
draft = последняя_версия_продукта(product)
if draft is None:
raise Exception(f'Не найдено последней версии продукта "{product}"')
#if draft is None:
# print (f'Не найдено рабочего макета для "{product}"')
proc = os.path.join(dir, 'create_next_version.py')
os.system(f'python3.6 {proc} {product} {draft}')
elif action == 'download':
path = arg(2)
file = arg(3)
if file is None:
file = os.path.basename(path)
gs.download(path, file)
elif action == 'upload':
gs.upload(arg(2), arg(3))
elif action == 'upload_distro':
gs.upload_distro(arg(2), arg(3), arg(4))
elif action == 'listdir':
for name in gs.listdir(arg(2)):
print(name)
elif action == 'get_versions':
for version in gs.get_versions(arg(2)):
print(version.id)
elif action == 'download_distro':
gs.download_distro(arg(2), arg(3), arg(4))
elif action == 'include':
c = Console()
product = arg(2)
if product is None:
print_warning('Формат вызова: domino include <продукт> <модуль>')
print_warning(' <модуль> := <имя модуля> | domino | templates | exists > ')
sys.exit()
module = arg(3)
if module is None:
c.error(f'Не задан модуль')
sys.exit()
else:
print_comment(f'{module}')
product_draft = max(Server.get_drafts(product), default = None)
if product_draft is None:
c.error(f'Нет макета для "{product}"')
print_comment(f'{product}.{product_draft}')
filenames = []
# определение директорй
if module == 'templates':
product_folder = f'/DOMINO/products/{product}/{product_draft}/python/templates'
common_folder = '/DOMINO/products/_system/python/templates'
# определение списка файлов
for filename in os.listdir(common_folder):
filenames.append(filename)
elif module == 'tables':
product_folder = f'/DOMINO/products/{product}/{product_draft}/python/domino/tables'
common_folder = '/DOMINO/products/_system/python/tables'
for database_name in os.listdir(common_folder):
database_folder = os.path.join(common_folder, database_name)
if os.path.isdir(database_folder):
for table_name in os.listdir(database_folder):
hard_link(os.path.join(database_name, table_name), common_folder, product_folder)
elif module in ['components', 'responses', 'pages', 'databases', 'dicts', 'enums']:
copy_folder(product, product_draft, module)
elif module == 'all':
for module in ['components', 'responses', 'pages', 'databases', 'dicts', 'enums', 'tables']:
copy_folder(product, product_draft, module)
elif module == 'domino':
product_folder = f'/DOMINO/products/{product}/{product_draft}/python/domino'
common_folder = '/DOMINO/products/_system/python/domino'
for filename in os.listdir(common_folder):
filenames.append(filename)
else:
product_folder = f'/DOMINO/products/{product}/{product_draft}/python/domino'
common_folder = '/DOMINO/products/_system/python/domino'
# определение списка файлов
if module == 'exists':
filenames = os.listdir(product_folder)
else:
for filename in os.listdir(common_folder):
#print(filename)
if filename.startswith(module):
filenames.append(filename)
for address, dirs, files in os.walk(common_folder):
print('------------')
print(address, dirs, files)
#print(f'From "{common_folder}"')
#print(f'To "{product_folder}"')
#print(f'{filenames}')
#print(f'{filenames}')
for filename in filenames:
hard_link(filename, common_folder, product_folder)
else:
print(f'Неизвестная команда {action}')
|
[
"polikash@gmail.com"
] |
polikash@gmail.com
|
8ea7cda1c1ee33ae5c76fa40f4c8bb4b8f4314c3
|
45e376ae66b78b17788b1d3575b334b2cb1d0b1c
|
/tests/cloudformation/checks/resource/aws/test_AmazonMQBrokerPublicAccess.py
|
37710ba33578e981c9f2a0828d48921faa7ce386
|
[
"Apache-2.0"
] |
permissive
|
bridgecrewio/checkov
|
aeb8febed2ed90e61d5755f8f9d80b125362644d
|
e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d
|
refs/heads/main
| 2023-08-31T06:57:21.990147
| 2023-08-30T23:01:47
| 2023-08-30T23:01:47
| 224,386,599
| 5,929
| 1,056
|
Apache-2.0
| 2023-09-14T20:10:23
| 2019-11-27T08:55:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,444
|
py
|
import os
import unittest
from checkov.cloudformation.checks.resource.aws.AmazonMQBrokerPublicAccess import check
from checkov.cloudformation.runner import Runner
from checkov.runner_filter import RunnerFilter
class TestAmazonMQBrokerPublicAccess(unittest.TestCase):
def test_summary(self):
runner = Runner()
current_dir = os.path.dirname(os.path.realpath(__file__))
test_files_dir = current_dir + "/example_AmazonMQBrokerPublicAccess"
report = runner.run(root_folder=test_files_dir,runner_filter=RunnerFilter(checks=[check.id]))
summary = report.get_summary()
passing_resources = {
"AWS::AmazonMQ::Broker.PrivateBroker0",
"AWS::AmazonMQ::Broker.PrivateBroker1",
}
failing_resources = {
"AWS::AmazonMQ::Broker.PublicBroker0",
"AWS::AmazonMQ::Broker.PublicBroker1",
}
passed_check_resources = set([c.resource for c in report.passed_checks])
failed_check_resources = set([c.resource for c in report.failed_checks])
self.assertEqual(summary['passed'], 2)
self.assertEqual(summary['failed'], 2)
self.assertEqual(summary['skipped'], 0)
self.assertEqual(summary['parsing_errors'], 0)
self.assertEqual(passing_resources, passed_check_resources)
self.assertEqual(failing_resources, failed_check_resources)
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
bridgecrewio.noreply@github.com
|
7f41a27952caaadddc38b8c99ca3115a8f56128a
|
cc2fcc1a0c5ea9789f98ec97614d7b25b03ba101
|
/st2common/st2common/persistence/trigger.py
|
9a01acf09952ed40a2b9784989224410b6b9a9bd
|
[
"Apache-2.0"
] |
permissive
|
Junsheng-Wu/st2
|
6451808da7de84798641882ca202c3d1688f8ba8
|
c3cdf657f7008095f3c68b4132b9fe76d2f52d81
|
refs/heads/master
| 2022-04-30T21:32:44.039258
| 2020-03-03T07:03:57
| 2020-03-03T07:03:57
| 244,301,363
| 0
| 0
|
Apache-2.0
| 2022-03-29T22:04:26
| 2020-03-02T06:53:58
|
Python
|
UTF-8
|
Python
| false
| false
| 3,200
|
py
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from st2common import log as logging
from st2common import transport
from st2common.exceptions.db import StackStormDBObjectNotFoundError
from st2common.models.db.trigger import triggertype_access, trigger_access, triggerinstance_access
from st2common.persistence.base import (Access, ContentPackResource)
from st2common.transport import utils as transport_utils
LOG = logging.getLogger(__name__)
class TriggerType(ContentPackResource):
impl = triggertype_access
@classmethod
def _get_impl(cls):
return cls.impl
class Trigger(ContentPackResource):
impl = trigger_access
publisher = None
@classmethod
def _get_impl(cls):
return cls.impl
@classmethod
def _get_publisher(cls):
if not cls.publisher:
cls.publisher = transport.reactor.TriggerCUDPublisher(
urls=transport_utils.get_messaging_urls())
return cls.publisher
@classmethod
def delete_if_unreferenced(cls, model_object, publish=True, dispatch_trigger=True):
# Found in the innards of mongoengine.
# e.g. {'pk': ObjectId('5609e91832ed356d04a93cc0')}
delete_query = model_object._object_key
delete_query['ref_count__lte'] = 0
cls._get_impl().delete_by_query(**delete_query)
# Since delete_by_query cannot tell if teh delete actually happened check with a get call
# if the trigger was deleted. Unfortuantely, this opens up to races on delete.
confirmed_delete = False
try:
cls.get_by_id(model_object.id)
except (StackStormDBObjectNotFoundError, ValueError):
confirmed_delete = True
# Publish internal event on the message bus
if confirmed_delete and publish:
try:
cls.publish_delete(model_object)
except Exception:
LOG.exception('Publish failed.')
# Dispatch trigger
if confirmed_delete and dispatch_trigger:
try:
cls.dispatch_delete_trigger(model_object)
except Exception:
LOG.exception('Trigger dispatch failed.')
return model_object
class TriggerInstance(Access):
impl = triggerinstance_access
@classmethod
def _get_impl(cls):
return cls.impl
@classmethod
def delete_by_query(cls, *args, **query):
return cls._get_impl().delete_by_query(*args, **query)
|
[
"wei.ying@easystack.cn"
] |
wei.ying@easystack.cn
|
88a459054fe086e6f2a96d15ab8e887f827114b3
|
1180c0bfe29959d95f3c131e6e839950e528d4ee
|
/42/pgmilenkov/regex.py
|
db3e891349a6a6ed5fab6a8521b5513867ebd992
|
[] |
no_license
|
pybites/challenges
|
e3e461accd8e7f890aee8007ba5070086ef983fc
|
02b77652d0901e6e06cb9b1e7cb3e59c675445c2
|
refs/heads/community
| 2023-08-20T18:19:02.982214
| 2022-11-17T09:23:31
| 2022-11-17T09:23:31
| 78,264,928
| 764
| 3,115
| null | 2023-07-21T05:58:19
| 2017-01-07T07:17:50
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,457
|
py
|
import re
def extract_course_times():
'''Use re.findall to capture all mm:ss timestamps in a list'''
flask_course = ('Introduction 1 Lecture 01:47'
'The Basics 4 Lectures 32:03'
'Getting Technical! 4 Lectures 41:51'
'Challenge 2 Lectures 27:48'
'Afterword 1 Lecture 05:02')
return re.findall(r'\d{2}:\d{2}', flask_course)
def split_on_multiple_chars():
'''Use re.split to split log line by ; , .
but not on the last ... so list should have len of 4
(hint check re.split docs for extra switches)'''
logline = ('2017-11-03T01:00:02;challenge time,regex!.'
'hope you join ... soon')
return re.split(r';|,|\.',logline, maxsplit=3)
def get_all_hashtags_and_links():
'''Use re.findall to extract the URL and 2 hashtags of this tweet'''
tweet = ('New PyBites article: Module of the Week - Requests-cache '
'for Repeated API Calls - http://pybit.es/requests-cache.html '
'#python #APIs')
# return re.findall(r'#\S+',tweet)
return re.findall(r'(http\S+|#\S+)',tweet)
def match_first_paragraph():
'''Use re.sub to extract the content of the first paragraph (excl tags)'''
html = ('<p>pybites != greedy</p>'
'<p>not the same can be said REgarding ...</p>')
return re.sub(r'<p>(.*?)</p>.*',r'\1',html)
def find_double_words():
'''Use re.search(regex, text).group() to find the double word'''
text = 'Spain is so nice in the the spring'
result = re.search(r''
r'\b' # begin of word
r'(' # start group
r'[a-z' # lower case letters
r'A-Z' # upper case letters
r'0-9]' # digits
r'+' # zero or more occurences
r')' # end of group
r'\s+' # whitespaces
r'\1' # match group
r'\b', # end of word
text,re.VERBOSE)
text = text[:result.span()[0]] + result.group(1) + text[result.span()[1]:]
return result
def match_ip_v4_address(ip):
'''Use re.match to match an ip v4 address (no need for exact IP ranges)'''
return re.match(r'(\d{1,3}\.){3}\d{1,3}',ip)
if __name__ == '__main__':
print(match_first_paragraph())
|
[
"pybites@projects.bobbelderbos.com"
] |
pybites@projects.bobbelderbos.com
|
00c4adf04fdb31e76a8271a3d839b907cf5d21fd
|
a508ffe0942f75721d4623fcda9e57808f93f07d
|
/input_test/s.py
|
a5aa5c9bcccbb47b880e6950bb649080e34c2a96
|
[] |
no_license
|
ag8/magic
|
3a14a81f3c06fa67cd77de07045ee3dc3899ca7f
|
2768fc7490e6cc55b522be68926ad24d3caa939c
|
refs/heads/master
| 2021-01-22T06:49:29.561849
| 2017-10-30T23:34:57
| 2017-10-30T23:34:57
| 102,300,107
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 142
|
py
|
import pickle
with open('/data/affinity/2d/overlap_micro/OVERLAP_AREAS') as fp:
overlap_areas = pickle.load(fp)
print(overlap_areas)
|
[
"andrew2000g@gmail.com"
] |
andrew2000g@gmail.com
|
2fb669fc4597c7fbbb01eb650d16264493c9fb0f
|
33c1c5d0f48ad952776fe546a85350a441d6cfc2
|
/ABC/102/D.py
|
65484cf137a326b30baf30c052b7b04eae161ddc
|
[] |
no_license
|
hisyatokaku/Competition
|
985feb14aad73fda94804bb1145e7537b057e306
|
fdbf045a59eccb1b2502b018cab01810de4ea894
|
refs/heads/master
| 2021-06-30T18:48:48.256652
| 2020-11-16T11:55:12
| 2020-11-16T11:55:12
| 191,138,764
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,310
|
py
|
import math,string,itertools,fractions,heapq,collections,re,array,bisect,sys,random,time,copy,functools
from collections import deque
sys.setrecursionlimit(10**7)
inf = 10**20
mod = 10**9 + 7
DR = [1, -1, 0, 0]
DC = [0, 0, 1, -1]
def LI(): return [int(x) for x in sys.stdin.readline().split()]
def LI_(): return [int(x)-1 for x in sys.stdin.readline().split()]
def LF(): return [float(x) for x in sys.stdin.readline().split()]
def LS(): return sys.stdin.readline().split()
def I(): return int(sys.stdin.readline())
def F(): return float(sys.stdin.readline())
def S(): return input()
def main():
N = I()
A = LI()
cumsum = [0 for _ in range(N+1)]
for i in range(N):
cumsum[i+1] = cumsum[i] + A[i]
def get_cumsum(a, b):
# get cumsum from [a, b] element
return cumsum[b+1] - cumsum[a]
def separate_opt_sum(l, r, offset):
tot_sum = get_cumsum(l, r)
targ = offset + tot_sum / 2
left_i = bisect.bisect_left(cumsum, targ)
diff1 = inf
diff2 = inf
diff3 = inf
left_sum1, right_sum1, left_sum2, right_sum2 = inf, inf, inf, inf
left_sum3, right_sum3 = inf, inf
if l <= left_i - 2:
left_sum3 = get_cumsum(l, left_i - 2)
right_sum3 = tot_sum - left_sum3
diff3 = abs(right_sum3 - left_sum3)
if l <= left_i - 1:
left_sum1 = get_cumsum(l, left_i - 1)
right_sum1 = tot_sum - left_sum1
diff1 = abs(right_sum1 - left_sum1)
if left_i < r:
left_sum2 = get_cumsum(l, left_i)
right_sum2 = tot_sum - left_sum2
diff2 = abs(right_sum2 - left_sum2)
if min(diff1, diff2, diff3) == diff1:
return left_sum1, right_sum1
elif min(diff1, diff2, diff3) == diff2:
return left_sum2, right_sum2
return left_sum3, right_sum3
def _separate_opt_sum(l, r):
# find arr1, arr2 s.t. |arr1 - arr2| = min
# arr1 = get_cumsum(l, k), arr2 = get_cumsum(k+1, r)
tot_sum = get_cumsum(l, r)
m = (l + r) // 2
m_min = l - 1
m_max = r + 1
cur_min_diff = abs(2 * get_cumsum(l, m) - tot_sum)
cur_min_m = m
while m_min < m and m < m_max:
left_sum = get_cumsum(l, m)
right_sum = tot_sum - left_sum
cur_diff = abs(left_sum - right_sum)
if cur_diff < cur_min_diff:
cur_min_diff = cur_diff
cur_min_m = m
if left_sum < right_sum:
m_min = m
m = (m + r) // 2
elif left_sum > right_sum:
m_max = m
m = (l + m) // 2
else:
break
l_ans = get_cumsum(l, cur_min_m)
r_ans = get_cumsum(cur_min_m + 1, r)
return l_ans, r_ans
ans = inf
for sep in range(1, N-2):
left_S = get_cumsum(0, sep)
right_S = get_cumsum(sep+1, N-1)
# import pdb
# pdb.set_trace()
p, q = separate_opt_sum(0, sep, 0)
r, s = separate_opt_sum(sep+1, N-1, left_S)
# print('sep:', sep, ' ', p, q, r, s)
# print('\tleft_S:', left_S, ' right_S:', right_S)
ans = min(ans, max(p, q, r, s) - min(p, q, r, s))
print(ans)
main()
|
[
"hisyatokaku2005@yahoo.co.jp"
] |
hisyatokaku2005@yahoo.co.jp
|
bcd820a2e7d5a5a1cc617759d8a7456ea44b3f69
|
7e8cee08e8a583cfcefbf86f9272a65bca4dd2e4
|
/Test/Wx/GridCustEditor.py
|
90505b7730f05f209dc030190c349d8991dff6f1
|
[] |
no_license
|
PREM1980/ecomstore
|
01adb86b8423100421806097a518df08ab30c4c8
|
0a01e1826699c8656fdb2502741f8b638948a6e4
|
refs/heads/master
| 2016-09-05T16:58:30.396618
| 2013-05-02T23:15:35
| 2013-05-02T23:15:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,967
|
py
|
import string
import wx
import wx.grid as gridlib
#---------------------------------------------------------------------------
class MyCellEditor(gridlib.PyGridCellEditor):
"""
This is a sample GridCellEditor that shows you how to make your own custom
grid editors. All the methods that can be overridden are shown here. The
ones that must be overridden are marked with "*Must Override*" in the
docstring.
"""
def __init__(self, log):
self.log = log
self.log.write("MyCellEditor ctor\n")
gridlib.PyGridCellEditor.__init__(self)
def Create(self, parent, id, evtHandler):
"""
Called to create the control, which must derive from wx.Control.
*Must Override*
"""
self.log.write("MyCellEditor: Create\n")
self._tc = wx.TextCtrl(parent, id, "")
self._tc.SetInsertionPoint(0)
self.SetControl(self._tc)
if evtHandler:
self._tc.PushEventHandler(evtHandler)
def SetSize(self, rect):
"""
Called to position/size the edit control within the cell rectangle.
If you don't fill the cell (the rect) then be sure to override
PaintBackground and do something meaningful there.
"""
self.log.write("MyCellEditor: SetSize %s\n" % rect)
self._tc.SetDimensions(rect.x, rect.y, rect.width+2, rect.height+2,
wx.SIZE_ALLOW_MINUS_ONE)
def Show(self, show, attr):
"""
Show or hide the edit control. You can use the attr (if not None)
to set colours or fonts for the control.
"""
self.log.write("MyCellEditor: Show(self, %s, %s)\n" % (show, attr))
super(MyCellEditor, self).Show(show, attr)
def PaintBackground(self, rect, attr):
"""
Draws the part of the cell not occupied by the edit control. The
base class version just fills it with background colour from the
attribute. In this class the edit control fills the whole cell so
don't do anything at all in order to reduce flicker.
"""
self.log.write("MyCellEditor: PaintBackground\n")
def BeginEdit(self, row, col, grid):
"""
Fetch the value from the table and prepare the edit control
to begin editing. Set the focus to the edit control.
*Must Override*
"""
self.log.write("MyCellEditor: BeginEdit (%d,%d)\n" % (row, col))
self.startValue = grid.GetTable().GetValue(row, col)
self._tc.SetValue(self.startValue)
self._tc.SetInsertionPointEnd()
self._tc.SetFocus()
# For this example, select the text
self._tc.SetSelection(0, self._tc.GetLastPosition())
def EndEdit(self, row, col, grid):
"""
Complete the editing of the current cell. Returns True if the value
has changed. If necessary, the control may be destroyed.
*Must Override*
"""
self.log.write("MyCellEditor: EndEdit (%d,%d)\n" % (row, col))
changed = False
val = self._tc.GetValue()
if val != self.startValue:
changed = True
grid.GetTable().SetValue(row, col, val) # update the table
self.startValue = ''
self._tc.SetValue('')
return changed
def Reset(self):
"""
Reset the value in the control back to its starting value.
*Must Override*
"""
self.log.write("MyCellEditor: Reset\n")
self._tc.SetValue(self.startValue)
self._tc.SetInsertionPointEnd()
def IsAcceptedKey(self, evt):
"""
Return True to allow the given key to start editing: the base class
version only checks that the event has no modifiers. F2 is special
and will always start the editor.
"""
self.log.write("MyCellEditor: IsAcceptedKey: %d\n" % (evt.GetKeyCode()))
## We can ask the base class to do it
#return super(MyCellEditor, self).IsAcceptedKey(evt)
# or do it ourselves
return (not (evt.ControlDown() or evt.AltDown()) and
evt.GetKeyCode() != wx.WXK_SHIFT)
def StartingKey(self, evt):
"""
If the editor is enabled by pressing keys on the grid, this will be
called to let the editor do something about that first key if desired.
"""
self.log.write("MyCellEditor: StartingKey %d\n" % evt.GetKeyCode())
key = evt.GetKeyCode()
ch = None
if key in [ wx.WXK_NUMPAD0, wx.WXK_NUMPAD1, wx.WXK_NUMPAD2, wx.WXK_NUMPAD3,
wx.WXK_NUMPAD4, wx.WXK_NUMPAD5, wx.WXK_NUMPAD6, wx.WXK_NUMPAD7,
wx.WXK_NUMPAD8, wx.WXK_NUMPAD9
]:
ch = ch = chr(ord('0') + key - wx.WXK_NUMPAD0)
elif key < 256 and key >= 0 and chr(key) in string.printable:
ch = chr(key)
if ch is not None:
# For this example, replace the text. Normally we would append it.
#self._tc.AppendText(ch)
self._tc.SetValue(ch)
self._tc.SetInsertionPointEnd()
else:
evt.Skip()
def StartingClick(self):
"""
If the editor is enabled by clicking on the cell, this method will be
called to allow the editor to simulate the click on the control if
needed.
"""
self.log.write("MyCellEditor: StartingClick\n")
def Destroy(self):
"""final cleanup"""
self.log.write("MyCellEditor: Destroy\n")
super(MyCellEditor, self).Destroy()
def Clone(self):
"""
Create a new object which is the copy of this one
*Must Override*
"""
self.log.write("MyCellEditor: Clone\n")
return MyCellEditor(self.log)
#---------------------------------------------------------------------------
class GridEditorTest(gridlib.Grid):
def __init__(self, parent, log):
gridlib.Grid.__init__(self, parent, -1)
self.log = log
self.CreateGrid(10, 3)
# Somebody changed the grid so the type registry takes precedence
# over the default attribute set for editors and renderers, so we
# have to set null handlers for the type registry before the
# default editor will get used otherwise...
#self.RegisterDataType(wxGRID_VALUE_STRING, None, None)
#self.SetDefaultEditor(MyCellEditor(self.log))
# Or we could just do it like this:
#self.RegisterDataType(wx.GRID_VALUE_STRING,
# wx.GridCellStringRenderer(),
# MyCellEditor(self.log))
# )
# but for this example, we'll just set the custom editor on one cell
self.SetCellEditor(1, 0, MyCellEditor(self.log))
self.SetCellValue(1, 0, "Try to edit this box")
# and on a column
attr = gridlib.GridCellAttr()
attr.SetEditor(MyCellEditor(self.log))
self.SetColAttr(2, attr)
self.SetCellValue(1, 2, "or any in this column")
self.SetColSize(0, 150)
self.SetColSize(1, 150)
self.SetColSize(2, 150)
#---------------------------------------------------------------------------
class TestFrame(wx.Frame):
def __init__(self, parent, log):
wx.Frame.__init__(self, parent, -1, "Custom Grid Cell Editor Test",
size=(640,480))
grid = GridEditorTest(self, log)
#---------------------------------------------------------------------------
if __name__ == '__main__':
import sys
app = wx.PySimpleApp()
frame = TestFrame(None, sys.stdout)
frame.Show(True)
app.MainLoop()
|
[
"prem1pre@gmail.com"
] |
prem1pre@gmail.com
|
172a466d5f80f6441ed6b874517a024f13c5aa06
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_123/656.py
|
d804f5541e5fb0795836e9f5a04694acf4147beb
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,797
|
py
|
#
# By Allan Douglas R. de Oliveira
# This program is under public license
#
########################
# makes python 2.x behave like python 3k
from __future__ import print_function, unicode_literals, division
# common imports
import sys
import operator
import math
from io import StringIO
from itertools import chain, combinations, product, permutations, islice
from collections import namedtuple
from functools import reduce
if sys.version_info[0] >= 3:
#import numpy as np
pass
else:
# for pypy until it doesn't support py3k
from itertools import izip as zip, ifilter as filter, imap as map
range = xrange
# numpypy may not work well on windows, yet
#import numpypy as np
# integer square root
def isqrt(n):
x = n
y = (x + n // x) // 2
while y < x:
x = y
y = (x + n // x) // 2
return x
# Parallel map with switch to serial map
def pmap(f, iterable, parallel=True):
if parallel:
from multiprocessing import Pool
p = Pool(7)
return p.imap(f, iterable, chunksize=1)
else:
return map(f, iterable)
def dot_product(v1, v2, sum=sum, map=map, mul=operator.mul):
return sum(map(mul, v1, v2))
# some linalg utility definitions
Line = namedtuple('Line', 'A, B, C') # Line is Ax + By = C
Point = namedtuple('Point', 'x, y')
def iszero(n):
return abs(n) < 1e-12
def point_to_line(p1, m):
A, B, C = (m, -1, m*p1.x - p1.y)
return Line(A, B, C)
def points_to_line(p1, p2):
L = p2.y - p1.y
K = p2.x - p1.x
A, B, C = (L, -K, L * p1.x - K * p1.y)
return Line(A, B, C)
def line_intersection2D(line1, line2):
A1, B1, C1 = line1
A2, B2, C2 = line2
det = A1*B2 - A2*B1
if iszero(det): # parallel
return None
else:
x = (B2*C1 - B1*C2) / det
y = (A1*C2 - A2*C1) / det
return Point(x, y)
def calc_coord_y(line, x):
y = (line.C - line.A * x) / line.B
return y
# end of standard stuff
########################
sample_input = StringIO('''4
2 2
2 1
2 4
2 1 1 6
10 4
25 20 9 100
1 4
1 1 1 1''')
def check(motes_ordered, A):
for i, mote in enumerate(motes_ordered):
if A > mote:
A += mote
else:
return (i, A)
return None
def try_delete(i, motes_ordered, A, changes):
#print ('try delete', A, changes, motes_ordered)
motes_deleted = motes_ordered[i:-1]
return process(motes_deleted, A, changes + 1)
def try_add(i, motes_ordered, A, changes):
#print ('try add', A, changes, motes_ordered)
#minimum_new_mote_size = motes_ordered[i] - A + 1
#assert minimum_new_mote_size > 0
if A <= 1:
return None
maximum_new_mote_size = A - 1
results = []
for new_mote_size in range(maximum_new_mote_size, maximum_new_mote_size+1):
motes_added_new = [new_mote_size] + motes_ordered[i:]
process_result = process(motes_added_new, A, changes + 1)
if process_result is not None:
results.append(process_result)
return None if len(results) == 0 else min(results)
def process(motes_ordered, A, changes):
#print (A, changes, motes_ordered)
if len(motes_ordered) == 0:
#print ('empty list, returning')
return changes
result = check(motes_ordered, A)
if result is None:
return changes
else:
i, a = result
result_delete = try_delete(i, motes_ordered, a, changes)
result_add = try_add(i, motes_ordered, a, changes)
assert result_delete is not None or result_add is not None
if result_delete is None:
return result_add
elif result_add is None:
return result_delete
else:
return min(result_add, result_delete)
def process_test_case(inputs):
A, N, motes = inputs
motes_ordered = sorted(motes)
changes = 0
process_result = process(motes_ordered, A, changes)
assert process_result is not None
return process_result
def read_test_case(f):
A, N = [int(x) for x in f.readline().split()]
motes = [int(x) for x in f.readline().split()]
return (A, N, motes)
def print_result(i, result):
if result is None:
print('Case #%d: %s' % (i+1, 'Error'))
else:
print('Case #%d: %d' % (i+1, result))
##########################
# basic test case reading and processing skeleton
def read_test_cases(f):
T = int(f.readline())
return [read_test_case(f) for t in range(T)]
def main(stream, parallel):
for i, result in enumerate(pmap(process_test_case, read_test_cases(stream), parallel=parallel)):
print_result(i, result)
if __name__ == '__main__':
if len(sys.argv) > 1:
main(open(sys.argv[1]), True)
else:
main(sample_input, False)
##########################
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
39d1e033b8bb386f1fc6b9d116ec599b624a8828
|
dd4d1a61ec680a86d4b569490bf2a898ea0d7557
|
/appengine/predator/analysis/test/clusterfuzz_data_test.py
|
d1d1f2e8d5719c1e04bee66fb5c8919fa642a706
|
[
"BSD-3-Clause"
] |
permissive
|
mcgreevy/chromium-infra
|
f1a68914b47bcbe3cd8a424f43741dd74fedddf4
|
09064105713603f7bf75c772e8354800a1bfa256
|
refs/heads/master
| 2022-10-29T23:21:46.894543
| 2017-05-16T06:22:50
| 2017-05-16T06:22:50
| 91,423,078
| 1
| 1
|
BSD-3-Clause
| 2022-10-01T18:48:03
| 2017-05-16T06:23:34
|
Python
|
UTF-8
|
Python
| false
| false
| 5,279
|
py
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import mock
from analysis.analysis_testcase import AnalysisTestCase
from analysis.clusterfuzz_data import ClusterfuzzData
from analysis.clusterfuzz_parser import ClusterfuzzParser
from analysis.stacktrace import CallStack
from analysis.stacktrace import StackFrame
from analysis.stacktrace import Stacktrace
from analysis.type_enums import CrashClient
from analysis.type_enums import SanitizerType
from libs.deps.chrome_dependency_fetcher import ChromeDependencyFetcher
from libs.deps.dependency import Dependency
from libs.deps.dependency import DependencyRoll
class CusterfuzzDataTest(AnalysisTestCase):
"""Tests ``ClusterfuzzData`` class."""
def testProperties(self):
"""Tests ``ClusterfuzzData`` specific properties."""
raw_crash_data = self.GetDummyClusterfuzzData(sanitizer='ASAN')
crash_data = ClusterfuzzData(raw_crash_data)
self.assertEqual(crash_data.crashed_address,
raw_crash_data['customized_data']['crashed_address'])
self.assertEqual(crash_data.crashed_type,
raw_crash_data['customized_data']['crashed_type'])
self.assertEqual(crash_data.sanitizer,
SanitizerType.ADDRESS_SANITIZER)
self.assertEqual(crash_data.job_type,
raw_crash_data['customized_data']['job_type'])
self.assertEqual(crash_data.regression_range,
raw_crash_data['customized_data']['regression_range'])
self.assertEqual(crash_data.testcase,
raw_crash_data['customized_data']['testcase'])
@mock.patch('analysis.clusterfuzz_parser.ClusterfuzzParser.Parse')
def testParseStacktraceFailed(self, mock_parse):
"""Tests that ``stacktrace`` is None when failed to pars stacktrace."""
mock_parse.return_value = None
crash_data = ClusterfuzzData(self.GetDummyClusterfuzzData())
self.assertIsNone(crash_data.stacktrace)
def testParseStacktraceSucceeded(self):
"""Tests parsing ``stacktrace``."""
crash_data = ClusterfuzzData(self.GetDummyClusterfuzzData())
stack = CallStack(0)
stacktrace = Stacktrace([stack], stack)
with mock.patch(
'analysis.clusterfuzz_parser.ClusterfuzzParser.Parse') as mock_parse:
mock_parse.return_value = stacktrace
self._VerifyTwoStacktracesEqual(crash_data.stacktrace, stacktrace)
def testParseStacktraceReturnsCache(self):
"""Tests that ``stacktrace`` returns cached ``_stacktrace`` value."""
crash_data = ClusterfuzzData(self.GetDummyClusterfuzzData())
stack = CallStack(1)
stacktrace = Stacktrace([stack], stack)
crash_data._stacktrace = stacktrace
self._VerifyTwoStacktracesEqual(crash_data.stacktrace, stacktrace)
def testDependencies(self):
"""Tests ``dependencies`` property."""
dep = Dependency('src/', 'https://repo', 'rev1')
crash_data = ClusterfuzzData(self.GetDummyClusterfuzzData(
dependencies=[{'dep_path': dep.path,
'repo_url': dep.repo_url,
'revision': dep.revision}]))
self.assertEqual(len(crash_data.dependencies), 1)
self.assertTrue(dep.path in crash_data.dependencies)
self.assertEqual(crash_data.dependencies[dep.path].path, dep.path)
self.assertEqual(crash_data.dependencies[dep.path].repo_url, dep.repo_url)
self.assertEqual(crash_data.dependencies[dep.path].revision, dep.revision)
def testDependenciesReturnsCache(self):
"""Tests that ``dependencies`` returns cached ``_dependencies`` value."""
crash_data = ClusterfuzzData(self.GetDummyClusterfuzzData())
deps = {'src/': Dependency('src/', 'https://repo', 'rev')}
crash_data._dependencies = deps
self.assertEqual(crash_data.dependencies, deps)
def testDependencyRollsReturnsCache(self):
"""Tests that ``dependency_rolls`` returns cached ``_dependency_rolls``."""
crash_data = ClusterfuzzData(self.GetDummyClusterfuzzData())
dep_roll = {'src/': DependencyRoll('src/', 'https://repo', 'rev0', 'rev3')}
crash_data._dependency_rolls = dep_roll
self.assertEqual(crash_data.dependency_rolls, dep_roll)
def testDependencyRolls(self):
"""Tests ``regression_rolls`` property."""
dep_roll = DependencyRoll('src/', 'https://repo', 'rev1', 'rev6')
crash_data = ClusterfuzzData(self.GetDummyClusterfuzzData(
dependency_rolls=[{'dep_path': dep_roll.path,
'repo_url': dep_roll.repo_url,
'old_revision': dep_roll.old_revision,
'new_revision': dep_roll.new_revision}]))
self.assertEqual(len(crash_data.dependency_rolls), 1)
self.assertTrue(dep_roll.path in crash_data.dependency_rolls)
self.assertEqual(crash_data.dependency_rolls[dep_roll.path].path,
dep_roll.path)
self.assertEqual(crash_data.dependency_rolls[dep_roll.path].repo_url,
dep_roll.repo_url)
self.assertEqual(crash_data.dependency_rolls[dep_roll.path].old_revision,
dep_roll.old_revision)
self.assertEqual(crash_data.dependency_rolls[dep_roll.path].new_revision,
dep_roll.new_revision)
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
35d07eeece96e275825fbdd83830f5c389cb5f9c
|
6b8b6e5c7a31342b781909623d4fe60a563482d4
|
/sensor/DS18B20.py
|
ec091e5f56edd07f7219f5a7edd77bf711220b0d
|
[] |
no_license
|
ariebroere/sensor
|
7d3d4cd6ef5348bf5f3e3bdc6731c7b237200447
|
1ede75d51d67622352cb4a3e918255f5cae3c061
|
refs/heads/master
| 2021-01-18T22:50:02.361613
| 2017-04-02T06:29:40
| 2017-04-02T06:29:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,926
|
py
|
# The MIT License (MIT)
#
# Copyright (c) 2015 Nick Lee
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Include the sensor directory, so this file may be run as a test script.
if __name__ == "__main__" and __package__ is None:
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import subprocess
import sensor
from sensor.util import Temperature
class DS18B20(sensor.SensorBase):
def __init__(self, addr):
super(DS18B20, self).__init__(self._update_sensor_data)
self._device = '/sys/bus/w1/devices/%s/w1_slave' % addr
self._temperature = None
def temperature(self):
self._update()
return Temperature(C=self._temperature) if self._temperature is not None else None
@sensor.w1_lock
def _update_sensor_data(self):
# Try at most 3 times
for i in range(0,3):
# Split output into separate lines.
lines = subprocess.check_output(['cat', self._device]).split('\n')
# If the first line does not end with 'YES', try again.
if lines[0][-3:] != 'YES':
time.sleep(0.2)
continue
# If the second line does not have a 't=', try again.
pos = lines[1].find('t=')
if pos < 0:
time.sleep(0.2)
continue
# Extract the temperature.
self._temperature = float(lines[1][pos+2:]) / 1000.0
return
# Failed reading
self._temperature = None
""" Run this file as a test script
1. Find the sensor's 1-wire address
$ cd /sys/bus/w1/devices
$ ls
Look for '28-..........'. That is the address.
Then:
$ python DS18B20.py <address>
"""
if __name__ == '__main__':
import sys, time
ds = DS18B20(sys.argv[1])
for i in range(0, 30):
print ds.temperature()
time.sleep(1)
|
[
"lee1nick@yahoo.ca"
] |
lee1nick@yahoo.ca
|
4fddac42ee4bc07489ef1b4e274f85afb21e0de4
|
a6e4a6f0a73d24a6ba957277899adbd9b84bd594
|
/sdk/python/pulumi_azure_native/azuredata/v20170301preview/get_sql_server_registration.py
|
212ce2e8e70f2e01cb921271035d0102a9a6da0b
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
MisinformedDNA/pulumi-azure-native
|
9cbd75306e9c8f92abc25be3f73c113cb93865e9
|
de974fd984f7e98649951dbe80b4fc0603d03356
|
refs/heads/master
| 2023-03-24T22:02:03.842935
| 2021-03-08T21:16:19
| 2021-03-08T21:16:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,406
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetSqlServerRegistrationResult',
'AwaitableGetSqlServerRegistrationResult',
'get_sql_server_registration',
]
@pulumi.output_type
class GetSqlServerRegistrationResult:
"""
A SQL server registration.
"""
def __init__(__self__, id=None, location=None, name=None, property_bag=None, resource_group=None, subscription_id=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if property_bag and not isinstance(property_bag, str):
raise TypeError("Expected argument 'property_bag' to be a str")
pulumi.set(__self__, "property_bag", property_bag)
if resource_group and not isinstance(resource_group, str):
raise TypeError("Expected argument 'resource_group' to be a str")
pulumi.set(__self__, "resource_group", resource_group)
if subscription_id and not isinstance(subscription_id, str):
raise TypeError("Expected argument 'subscription_id' to be a str")
pulumi.set(__self__, "subscription_id", subscription_id)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="propertyBag")
def property_bag(self) -> Optional[str]:
"""
Optional Properties as JSON string
"""
return pulumi.get(self, "property_bag")
@property
@pulumi.getter(name="resourceGroup")
def resource_group(self) -> Optional[str]:
"""
Resource Group Name
"""
return pulumi.get(self, "resource_group")
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> Optional[str]:
"""
Subscription Id
"""
return pulumi.get(self, "subscription_id")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetSqlServerRegistrationResult(GetSqlServerRegistrationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSqlServerRegistrationResult(
id=self.id,
location=self.location,
name=self.name,
property_bag=self.property_bag,
resource_group=self.resource_group,
subscription_id=self.subscription_id,
tags=self.tags,
type=self.type)
def get_sql_server_registration(resource_group_name: Optional[str] = None,
sql_server_registration_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSqlServerRegistrationResult:
"""
A SQL server registration.
:param str resource_group_name: Name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str sql_server_registration_name: Name of the SQL Server registration.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['sqlServerRegistrationName'] = sql_server_registration_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:azuredata/v20170301preview:getSqlServerRegistration', __args__, opts=opts, typ=GetSqlServerRegistrationResult).value
return AwaitableGetSqlServerRegistrationResult(
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
property_bag=__ret__.property_bag,
resource_group=__ret__.resource_group,
subscription_id=__ret__.subscription_id,
tags=__ret__.tags,
type=__ret__.type)
|
[
"noreply@github.com"
] |
MisinformedDNA.noreply@github.com
|
610b9ee40927adccd43e1fe32dcc8a11699359b4
|
50008b3b7fb7e14f793e92f5b27bf302112a3cb4
|
/recipes/Python/442497_Pattern_List/recipe-442497.py
|
7a7e212a314061dc34d2a57bd9d425d6b849bcf9
|
[
"Python-2.0",
"MIT"
] |
permissive
|
betty29/code-1
|
db56807e19ac9cfe711b41d475a322c168cfdca6
|
d097ca0ad6a6aee2180d32dce6a3322621f655fd
|
refs/heads/master
| 2023-03-14T08:15:47.492844
| 2021-02-24T15:39:59
| 2021-02-24T15:39:59
| 341,878,663
| 0
| 0
|
MIT
| 2021-02-24T15:40:00
| 2021-02-24T11:31:15
|
Python
|
UTF-8
|
Python
| false
| false
| 800
|
py
|
import re
class PatternList( object ):
"""A Patternlist is a list of regular expressions. the 'in' operator
allows a string to be compared against each expression (using search
NOT match)"""
def __init__(self , patterns = []):
self.patterns = []
for p in patterns:
self.add( p )
def add( self , pattern ):
pat = re.compile( pattern )
self.patterns.append( pat )
def __contains__(self , item ):
ret = False
for p in self.patterns:
if p.search( item ):
ret= True
break
return ret
if __name__=="__main__":
examplelist = PatternList( [ ".*txt$" , ".*doc$" ])
assert( "test.txt" in examplelist )
assert( "test.xls" not in examplelist )
|
[
"betty@qburst.com"
] |
betty@qburst.com
|
96038f0fdec7f1c6c7c9e3d1da8063fe493d6e40
|
1674e40a5dab691961ae676b3d6752870df1c60b
|
/.cache/JetBrains/PyCharm2020.2/python_stubs/-988789078/_multibytecodec.py
|
fdb881ed202160e3f7d75a9bd8465130cfb97a9a
|
[] |
no_license
|
AiperiAkhumbai/group_project
|
a7c0efacbdcfc4a35d62b6321b255e3ed9e3436c
|
9c62b9964776306ab85901b501536eb667d3c337
|
refs/heads/main
| 2023-01-23T08:12:47.433544
| 2020-11-24T18:57:49
| 2020-11-24T18:57:49
| 313,209,027
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,428
|
py
|
# encoding: utf-8
# module _multibytecodec
# from /usr/lib/python3.8/lib-dynload/_multibytecodec.cpython-38-x86_64-linux-gnu.so
# by generator 1.147
# no doc
# no imports
# functions
def __create_codec(*args, **kwargs): # real signature unknown
pass
# classes
class MultibyteIncrementalDecoder(object):
# no doc
def decode(self, *args, **kwargs): # real signature unknown
pass
def getstate(self, *args, **kwargs): # real signature unknown
pass
def reset(self, *args, **kwargs): # real signature unknown
pass
def setstate(self, *args, **kwargs): # real signature unknown
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
errors = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""how to treat errors"""
class MultibyteIncrementalEncoder(object):
# no doc
def encode(self, *args, **kwargs): # real signature unknown
pass
def getstate(self, *args, **kwargs): # real signature unknown
pass
def reset(self, *args, **kwargs): # real signature unknown
pass
def setstate(self, *args, **kwargs): # real signature unknown
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
errors = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""how to treat errors"""
class MultibyteStreamReader(object):
# no doc
def read(self, *args, **kwargs): # real signature unknown
pass
def readline(self, *args, **kwargs): # real signature unknown
pass
def readlines(self, *args, **kwargs): # real signature unknown
pass
def reset(self, *args, **kwargs): # real signature unknown
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
errors = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""how to treat errors"""
stream = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class MultibyteStreamWriter(object):
# no doc
def reset(self, *args, **kwargs): # real signature unknown
pass
def write(self, *args, **kwargs): # real signature unknown
pass
def writelines(self, *args, **kwargs): # real signature unknown
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
errors = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""how to treat errors"""
stream = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
# variables with complex values
__loader__ = None # (!) real value is '<_frozen_importlib_external.ExtensionFileLoader object at 0x7f4bd3248460>'
__spec__ = None # (!) real value is "ModuleSpec(name='_multibytecodec', loader=<_frozen_importlib_external.ExtensionFileLoader object at 0x7f4bd3248460>, origin='/usr/lib/python3.8/lib-dynload/_multibytecodec.cpython-38-x86_64-linux-gnu.so')"
|
[
"aiperiahumbaeva@gmail.com"
] |
aiperiahumbaeva@gmail.com
|
cec0a70d5e0bcd8539d350cd0653bff447945982
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_natural.py
|
32e26c958383409ba88754ec0a96fc02003e41a2
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
#calss header
class _NATURAL():
def __init__(self,):
self.name = "NATURAL"
self.definitions = [u'someone who was born with the right characteristics or abilities for doing a particular thing: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
88de148d9408767a1d93796ed6d7be6a97acbda6
|
36957a9ce540846d08f151b6a2c2d582cff1df47
|
/VR/Python/Python36/Lib/site-packages/django/views/generic/__init__.py
|
00119bf785b030b880c58b3e822cbc718741626b
|
[] |
no_license
|
aqp1234/gitVR
|
60fc952307ef413e396d31e0d136faffe087ed2b
|
e70bd82c451943c2966b8ad1bee620a0ee1080d2
|
refs/heads/master
| 2022-12-29T15:30:12.540947
| 2020-10-07T15:26:32
| 2020-10-07T15:26:32
| 290,163,043
| 0
| 1
| null | 2020-08-25T09:15:40
| 2020-08-25T08:47:36
|
C#
|
UTF-8
|
Python
| false
| false
| 128
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:5939f31179cac89ab31e52eefd5b17227620f86a240cd042518355fd4e94fa89
size 822
|
[
"aqp1234@naver.com"
] |
aqp1234@naver.com
|
e9b75525afbde18fac325b06becac1b37aafe034
|
9c85d132b2ed8c51f021f42ed9f20652827bca45
|
/source/res/scripts/client/gui/Scaleform/framework/tooltip_mgr.py
|
541c0f410e6c97aa6b1628f62d9bec9ec2b250a1
|
[] |
no_license
|
Mododejl/WorldOfTanks-Decompiled
|
0f4063150c7148184644768b55a9104647f7e098
|
cab1b318a58db1e428811c41efc3af694906ba8f
|
refs/heads/master
| 2020-03-26T18:08:59.843847
| 2018-06-12T05:40:05
| 2018-06-12T05:40:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,025
|
py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/framework/tooltip_mgr.py
import logging
import Keys
from gui.Scaleform.framework.entities.abstract.ToolTipMgrMeta import ToolTipMgrMeta
from gui.Scaleform.genConsts.TOOLTIPS_CONSTANTS import TOOLTIPS_CONSTANTS
from gui.shared import events
from gui.shared.tooltips import builders
from gui.app_loader import g_appLoader
from gui import InputHandler
_logger = logging.getLogger(__name__)
class ToolTip(ToolTipMgrMeta):
def __init__(self, settings, advComplexSettings, *noTooltipSpaceIDs):
super(ToolTip, self).__init__()
self._areTooltipsDisabled = False
self._isAllowedTypedTooltip = True
self._noTooltipSpaceIDs = noTooltipSpaceIDs
self._complex = builders.ComplexBuilder(TOOLTIPS_CONSTANTS.DEFAULT, TOOLTIPS_CONSTANTS.COMPLEX_UI, advComplexSettings)
self._builders = builders.LazyBuildersCollection(settings)
self._builders.addBuilder(builders.SimpleBuilder(TOOLTIPS_CONSTANTS.DEFAULT, TOOLTIPS_CONSTANTS.COMPLEX_UI))
self._dynamic = {}
self.__fastRedraw = False
self.__isAdvancedKeyPressed = False
self.__isComplex = False
self.__tooltipID = None
self.__args = None
self.__stateType = None
return
def show(self, data, linkage):
self.as_showS(data, linkage, self.__fastRedraw)
def handleKeyEvent(self, event):
tooltipType = self.__tooltipID
altPressed = event.key == Keys.KEY_LALT or event.key == Keys.KEY_RALT
self.__isAdvancedKeyPressed = event.isKeyDown() and altPressed
if tooltipType is None or not altPressed:
return
else:
args = self.__args
isSupportAdvanced = self.isSupportAdvanced(tooltipType, *args)
if isSupportAdvanced:
self.__fastRedraw = True
if self.__isComplex:
self.onCreateComplexTooltip(tooltipType, self.__stateType)
else:
self.onCreateTypedTooltip(tooltipType, args, self.__stateType)
return
def onCreateTypedTooltip(self, tooltipType, args, stateType):
if self._areTooltipsDisabled:
return
elif not self._isAllowedTypedTooltip:
return
else:
builder = self._builders.getBuilder(tooltipType)
if builder is not None:
data = builder.build(self, stateType, self.__isAdvancedKeyPressed, *args)
else:
_logger.warning('Tooltip can not be displayed: type "%s" is not found', tooltipType)
return
self.__cacheTooltipData(False, tooltipType, args, stateType)
if data is not None and data.isDynamic():
data.changeVisibility(True)
if tooltipType not in self._dynamic:
self._dynamic[tooltipType] = data
return
def onCreateComplexTooltip(self, tooltipID, stateType):
if self._areTooltipsDisabled:
return
self._complex.build(self, stateType, self.__isAdvancedKeyPressed, tooltipID)
self.__cacheTooltipData(True, tooltipID, tuple(), stateType)
def onHideTooltip(self, tooltipId):
if not self._areTooltipsDisabled and tooltipId in self._dynamic:
self._dynamic[tooltipId].changeVisibility(False)
self.__tooltipID = None
self.__fastRedraw = False
return
def _populate(self):
super(ToolTip, self)._populate()
g_appLoader.onGUISpaceEntered += self.__onGUISpaceEntered
self.addListener(events.AppLifeCycleEvent.CREATING, self.__onAppCreating)
InputHandler.g_instance.onKeyDown += self.handleKeyEvent
InputHandler.g_instance.onKeyUp += self.handleKeyEvent
def _dispose(self):
self._builders.clear()
g_appLoader.onGUISpaceEntered -= self.__onGUISpaceEntered
self.removeListener(events.AppLifeCycleEvent.CREATING, self.__onAppCreating)
while self._dynamic:
_, data = self._dynamic.popitem()
data.stopUpdates()
InputHandler.g_instance.onKeyDown -= self.handleKeyEvent
InputHandler.g_instance.onKeyUp -= self.handleKeyEvent
super(ToolTip, self)._dispose()
def __onGUISpaceEntered(self, spaceID):
self._isAllowedTypedTooltip = spaceID not in self._noTooltipSpaceIDs
def __onAppCreating(self, appNS):
if self.app.appNS != appNS:
self._areTooltipsDisabled = True
def isSupportAdvanced(self, tooltipType, *args):
builder = self._complex if self.__isComplex else self._builders.getBuilder(tooltipType)
return False if builder is None else builder.supportAdvanced(tooltipType, *args)
def __cacheTooltipData(self, isComplex, tooltipID, args, stateType):
self.__isComplex = isComplex
self.__tooltipID = tooltipID
self.__args = args
self.__stateType = stateType
|
[
"StranikS_Scan@mail.ru"
] |
StranikS_Scan@mail.ru
|
91bde4973cd20d507e01377170f7db767aba6dc2
|
ca59d18e503ef22fbc920c6de48ffc8eac5a1443
|
/tools/Polygraphy/tests/util/test_misc.py
|
fc3978c829d4d1d3d5bd3621ac14202fb4aa9d29
|
[
"Apache-2.0",
"BSD-3-Clause",
"ISC",
"BSD-2-Clause",
"MIT"
] |
permissive
|
boh-inspur/TensorRT
|
9fc0ae0ad4e31da040d10728b63d9dc284852b67
|
e4d2f7f4406f1c8f4632cc67de33728cef90ca29
|
refs/heads/master
| 2023-04-13T21:24:13.912673
| 2021-04-23T09:55:18
| 2021-04-23T09:55:18
| 265,431,588
| 0
| 0
|
Apache-2.0
| 2021-04-23T09:55:19
| 2020-05-20T02:49:58
| null |
UTF-8
|
Python
| false
| false
| 3,213
|
py
|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy.util import misc
import numpy as np
import pytest
VOLUME_CASES = [
((1, 1, 1), 1),
((2, 3, 4), 24),
(tuple(), 1),
]
@pytest.mark.parametrize("case", VOLUME_CASES)
def test_volume(case):
it, vol = case
assert misc.volume(it) == vol
class FindInDictCase(object):
def __init__(self, name, map, index, expected):
self.name = name
self.map = map
self.index = index
self.expected = expected
FIND_IN_DICT_CASES = [
FindInDictCase("resnet50_v1.5/output/Softmax:0", map={"resnet50_v1.5/output/Softmax:0": "x"}, index=None, expected="resnet50_v1.5/output/Softmax:0"),
FindInDictCase("resnet50_v1.5/output/Softmax:0", map={"resnet50_v1.5/output/softmax:0": "x"}, index=None, expected="resnet50_v1.5/output/softmax:0"),
]
@pytest.mark.parametrize("case", FIND_IN_DICT_CASES)
def test_find_in_dict(case):
actual = misc.find_in_dict(case.name, case.map, case.index)
assert actual == case.expected
SHAPE_OVERRIDE_CASES = [
((1, 3, 224, 224), (None, 3, 224, 224), True),
]
@pytest.mark.parametrize("case", SHAPE_OVERRIDE_CASES)
def test_is_valid_shape_override(case):
override, shape, expected = case
assert misc.is_valid_shape_override(new_shape=override, original_shape=shape) == expected
SHAPE_MATCHING_CASES = [
(np.zeros((1, 1, 3, 3)), (3, 3), (3, 3)), # Squeeze array shape
(np.zeros((1, 3, 3, 1)), (1, 1, 3, 3), (1, 1, 3, 3)), # Permute
(np.zeros((3, 3)), (1, 1, 3, 3), (3, 3)), # Squeeze specified shape
(np.zeros((3, 3)), (-1, 3), (3, 3)), # Infer dynamic
(np.zeros((3 * 224 * 224)), (None, 3, 224, 224), (1, 3, 224, 224)), # Reshape and Permute
(np.zeros((1, 3, 224, 224)), (None, 224, 224, 3), (1, 224, 224, 3)), # Permute
]
@pytest.mark.parametrize("case", SHAPE_MATCHING_CASES)
def test_shape_matching(case):
out, shape, expected_shape = case
out = misc.try_match_shape(out, shape)
assert out.shape == expected_shape
UNPACK_ARGS_CASES = [
((0, 1, 2), 3, (0, 1, 2)), # no extras
((0, 1, 2), 4, (0, 1, 2, None)), # 1 extra
((0, 1, 2), 2, (0, 1)), # 1 fewer
]
@pytest.mark.parametrize("case", UNPACK_ARGS_CASES)
def test_unpack_args(case):
args, num, expected = case
assert misc.unpack_args(args, num) == expected
UNIQUE_LIST_CASES = [
([], []),
([3, 1, 2], [3, 1, 2]),
([1, 2, 3, 2, 1], [1, 2, 3]),
([0, 0, 0, 0, 1, 0, 0], [0, 1]),
([5, 5, 5, 5, 5], [5]),
]
@pytest.mark.parametrize("case", UNIQUE_LIST_CASES)
def test_unique_list(case):
lst, expected = case
assert misc.unique_list(lst) == expected
|
[
"rajeevsrao@users.noreply.github.com"
] |
rajeevsrao@users.noreply.github.com
|
280bb1419d313f14f695994d51b7b7c91de537e3
|
834d7ea5179414f17d37f3bb58164b8f6ac11b24
|
/python/ThirteenTeV/DisappTrksAMSB/createPoints.py
|
579c1c5a331e3b70880d28547c2fb1073e64627e
|
[] |
no_license
|
diracyoon/genproductions
|
aa9ee41ac1dde9e14ed039496c3259328ece7073
|
a7740f4d28c7bfff4e71827dc807d57d974e29b7
|
refs/heads/master
| 2021-01-11T11:22:44.685243
| 2018-11-23T14:05:36
| 2018-11-23T14:05:36
| 72,719,084
| 1
| 0
| null | 2016-11-03T07:21:29
| 2016-11-03T07:21:29
| null |
UTF-8
|
Python
| false
| false
| 2,089
|
py
|
#!/usr/bin/env python
import os
def insertSLHA(outputName, massValue):
with open(outputName, 'r+') as f:
for x in range(5):
f.readline()
pos = f.tell()
f_remainder = f.read()
f.seek(pos)
with open('slha/AMSB_chargino_%dGeV_Isajet780.slha' % massValue, 'r') as slhaFile:
f.write(slhaFile.read())
f.write(f_remainder)
def findMassValue(fileName, particleName):
inputFile = open(fileName, 'r')
for line in inputFile:
if particleName in line:
return line.split()[1]
baseConfigFile = 'AMSB_chargino_M-XXXGeV_CTau-YYYcm_TuneCP5_13TeV_pythia8_cff.py'
baseParticleFile = 'geant4_AMSB_chargino.slha'
c = 299792458.0 * 100.0 # cm/s
# xsecs[mass in GeV] = xsec (pb)
xsecs = {
100 : 34.282,
200 : 2.709959,
300 : 0.577095,
400 : 0.179644,
500 : 0.06848,
600 : 0.029636,
700 : 0.013949,
800 : 0.0069704,
900 : 0.00364968
}
ctaus = [10, 100, 1000, 10000] # cm
for mass in xsecs:
for ctau in ctaus:
outputConfigFile = 'test/AMSB_chargino_M-%dGeV_CTau-%dcm_TuneCP5_13TeV_pythia8_cff.py' % (mass, ctau)
outputParticleFile = 'test/geant4_AMSB_chargino_%dGeV_ctau%dcm.slha' % (mass, ctau)
os.system('sed "s/XXX/' + str(mass) + '/g" ' + baseConfigFile + ' > ' + outputConfigFile)
os.system('sed -i "s/YYY/' + str(int(ctau * 10.0)) + '/g" ' + outputConfigFile) # mm
os.system('sed -i "s/ZZZ/' + str(xsecs[mass]) + '/g" ' + outputConfigFile)
insertSLHA(outputConfigFile, mass)
mW1ss = findMassValue(outputConfigFile, 'w1ss')
mZ1ss = findMassValue(outputConfigFile, 'z1ss')
tau = ctau / c * 1.e9 # ns
width = (1.97326979e-14 / ctau) # GeV
os.system('sed "s/_MW1SS/' + str(mW1ss) + '/g" ' + baseParticleFile + ' > ' + outputParticleFile)
os.system('sed -i "s/_MZ1SS/' + str(mZ1ss) + '/g" ' + outputParticleFile)
os.system('sed -i "s/_CTAU/' + str(ctau) + '/g" ' + outputParticleFile)
os.system('sed -i "s/_TAU/' + str(tau) + '/g" ' + outputParticleFile)
os.system('sed -i "s/_WIDTH/' + str(width) + '/g" ' + outputParticleFile)
print 'Created configuration fragments and particle files in directory: ' + os.getcwd() + '/test/'
|
[
"brian.patrick.francis@cern.ch"
] |
brian.patrick.francis@cern.ch
|
673f619abea67c616bf67a61024d4c5ad5f2befe
|
698176804e16c7ae59f66ccebdff746f74998662
|
/python/piketty/generatetmslice.py
|
62a1d462742ae093a247c9e9e1d7dd2c61d159c3
|
[
"MIT"
] |
permissive
|
tedunderwood/GenreProject
|
a7e30883123523b967214af28f4a137c60f3564b
|
7577f39f0cc89b9e85d0fbe67ae3e7797033588f
|
refs/heads/master
| 2021-01-23T19:12:59.786921
| 2016-03-17T16:11:36
| 2016-03-17T16:11:36
| 10,231,792
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 324
|
py
|
with open('/Users/tunder/Dropbox/GenreProject/metadata/topicmodelingsample.tsv', encoding = 'utf-8') as f:
filelines = f.readlines()
with open('/Users/tunder/Dropbox/GenreProject/metadata/tmslice.txt', mode = 'w', encoding = 'utf-8') as f:
for line in filelines[1:]:
label = line.split('\t')[0]
f.write(label + '\n')
|
[
"tunder@illinois.edu"
] |
tunder@illinois.edu
|
df60d9fd72aee467d847e5b7e7a8ec3c8ae8a680
|
7f20b1bddf9f48108a43a9922433b141fac66a6d
|
/csplugins/trunk/ucsd/rsaito/rs_Progs/rs_Python/rs_Python_Pack/tags/rs_Python_Pack090515/IVV_Packages/YO_IP/example2.py
|
4744447119df04b6f11f0fa2c5a5507d576157f6
|
[] |
no_license
|
ahdahddl/cytoscape
|
bf783d44cddda313a5b3563ea746b07f38173022
|
a3df8f63dba4ec49942027c91ecac6efa920c195
|
refs/heads/master
| 2020-06-26T16:48:19.791722
| 2013-08-28T04:08:31
| 2013-08-28T04:08:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 829
|
py
|
#!/usr/bin/env python
from lpsolve55 import *
lp = lpsolve('make_lp', 0, 4)
#lpsolve('set_verbose', lp, IMPORTANT)
lpsolve('set_obj_fn', lp, [1, 3, 6.24, 0.1])
lpsolve('add_constraint', lp, [0, 78.26, 0, 2.9], GE, 92.3)
lpsolve('add_constraint', lp, [0.24, 0, 11.31, 0], LE, 14.8)
lpsolve('add_constraint', lp, [12.68, 0, 0.08, 0.9], GE, 4)
lpsolve('set_lowbo', lp, [28.6, 0, 0, 18])
lpsolve('set_upbo', lp, [Infinite, Infinite, Infinite, 48.98])
lpsolve('set_col_name', lp, ['COLONE', 'COLTWO', 'COLTHREE', 'COLFOUR'])
lpsolve('set_row_name', lp, ['THISROW', 'THATROW', 'LASTROW'])
lpsolve('write_lp', lp, 'a.lp')
print lpsolve('get_mat', lp)[0]
lpsolve('solve', lp)
print lpsolve('get_objective', lp)
print lpsolve('get_variables', lp)[0]
print lpsolve('get_constraints', lp)[0]
lpsolve('delete_lp', lp)
|
[
"rsaito@0ecc0d97-ab19-0410-9704-bfe1a75892f5"
] |
rsaito@0ecc0d97-ab19-0410-9704-bfe1a75892f5
|
31d0b3108fb5597fc1566914fe1a6fd5bae45ff6
|
9db1103b05dc5053a984c2f46491e71216cbe13d
|
/everest/cascade/hierarchy.py
|
52bb27c03bc4a861f478cc873914137eda4e144c
|
[
"MIT"
] |
permissive
|
rsbyrne/everest
|
f396d11743fc0633992bb49bf40d6d5851c3fffa
|
1ec06301cdeb7c2b7d85daf6075d996c5529247e
|
refs/heads/master
| 2023-07-27T12:55:06.426748
| 2021-06-18T00:31:21
| 2021-06-18T00:31:21
| 222,559,267
| 2
| 1
|
MIT
| 2021-06-18T00:31:22
| 2019-11-18T22:43:49
|
Python
|
UTF-8
|
Python
| false
| false
| 4,292
|
py
|
###############################################################################
''''''
###############################################################################
from collections.abc import Mapping as _Mapping
from functools import lru_cache as _lru_cache
from . import _reseed
def flatten_hierarchy(hierarchy):
return dict(_flatten_hierarchy(hierarchy))
def _flatten_hierarchy(hierarchy):
for k, v in hierarchy.items():
if isinstance(v, Hierarchy):
for sk, sv in _flatten_hierarchy(v):
yield sk, sv
else:
yield k, v.value
class Item:
__slots__ = ('key', '_value')
def __init__(self, key, val, /):
self.key = key
self._value = val
@property
def value(self):
return self._value
@value.setter
def value(self, newval):
self._value = newval
def __str__(self):
return repr(self.value)
def __repr__(self):
return f'{type(self).__name__}({self.key}: {str(self)})'
class Hierarchy(_Mapping):
__slots__ = ('content', 'parent', 'subs', 'randhash')
# def __init__(self, *args, parent=None, **kwargs):
# super().__init__(*args, **kwargs)
def __init__(self, *args, parent = None, **kwargs):
self.content = dict(*args, **kwargs)
self.parent = parent
self.subs = dict()
self.randhash = _reseed.rdigits()
def flatten(self) -> dict:
return flatten_hierarchy(self)
# def remove_ghosts(self):
# for key, val in list(self.items()):
# if key.startswith('_'):
# del self[key]
# elif isinstance(val, type(self)):
# val.remove_ghosts()
def sub(self, key) -> 'Hierarchy':
self.subs[key] = subhier = type(self)(parent=self)
self.content.__setitem__(key, subhier)
return subhier
def __iter__(self):
return iter(self.content)
def __len__(self):
return len(self.content)
def __getitem__(self, arg, /):
out = self.raw_getitem(arg)
if isinstance(out, Item):
return out.value
return out
@_lru_cache
def raw_getitem(self, arg) -> Item:
if isinstance(arg, tuple):
out = self
for subarg in arg:
out = out.raw_getitem(subarg)
return out
try:
return super().__getitem__(arg)
except KeyError as exc:
for sub in self.subs.values():
try:
return sub.raw_getitem(arg)
except KeyError:
pass
raise KeyError from exc
def __setitem__(self, key, val):
try:
targ = self.raw_getitem(key)
if isinstance(targ, Item):
targ.value = val
else:
raise ValueError("Cannot manually set hierarchy.")
except KeyError:
if isinstance(val, Hierarchy):
sub = self.sub(key)
sub.update(val)
else:
if isinstance(val, Item):
val = val.value
self.content.__setitem__(key, Item(key, val))
def __delitem__(self, key):
self.content.__delitem__(key)
def update(self, source):
for key in source:
self[key] = source[key]
def __hash__(self):
return self.randhash
def __repr__(self):
return type(self).__name__ + super().__repr__()
def _repr_pretty_(self, p, cycle):
typnm = type(self).__name__
if cycle:
p.text(typnm + '{...}')
else:
with p.group(4, typnm + '({', '})'):
for idx, (key, val) in enumerate(self.items()):
if isinstance(val, Item):
val = val.value
if idx:
p.text(',')
p.breakable()
p.pretty(key)
p.text(': ')
p.pretty(val)
p.breakable()
def copy(self):
return type(self)(**self)
###############################################################################
###############################################################################
|
[
"rohan.byrne@gmail.com"
] |
rohan.byrne@gmail.com
|
86d43341acd4d75d7e9f6444a05667b88c3356c0
|
42b9bafc3c757543328d93fb60269ad4255aae17
|
/env/lib/python3.7/site-packages/jet/tests/settings.py
|
0c8cb379fe107502b4dc3633895113b84b4a7f85
|
[
"MIT"
] |
permissive
|
mejeng/kasir
|
4fe66d1828e72b64d770426d71185cdd3c54127e
|
cc6f9158b61c0cb45078ddf798af9588c8771311
|
refs/heads/master
| 2020-09-25T03:36:10.144439
| 2019-11-30T07:59:23
| 2019-11-30T07:59:23
| 225,908,795
| 2
| 0
|
MIT
| 2019-12-04T16:21:15
| 2019-12-04T16:21:15
| null |
UTF-8
|
Python
| false
| false
| 1,945
|
py
|
import os
import django
from django.conf import global_settings
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_KEY = '!DJANGO_JET_TESTS!'
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DEBUG_PROPAGATE_EXCEPTIONS = True
ROOT_URLCONF = 'jet.tests.urls'
INSTALLED_APPS = (
'jet.dashboard',
'jet',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'jet.tests',
)
MIDDLEWARE = MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
if django.VERSION[:2] < (1, 9):
TEMPLATE_CONTEXT_PROCESSORS = tuple(global_settings.TEMPLATE_CONTEXT_PROCESSORS) + (
'django.core.context_processors.request',
)
else:
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': (
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
)
},
},
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
}
TIME_ZONE = 'UTC'
LANGUAGE_CODE = 'en-US'
USE_I18N = True
USE_L10N = True
MEDIA_ROOT = ''
MEDIA_URL = ''
STATIC_URL = '/static/'
JET_INDEX_DASHBOARD = 'jet.tests.dashboard.TestIndexDashboard'
JET_APP_INDEX_DASHBOARD = 'jet.tests.dashboard.TestAppIndexDashboard'
|
[
"slashsdull@gmail.com"
] |
slashsdull@gmail.com
|
731d0f6696c9cae00ba15f73151571a42ef6dae3
|
877bd731bc97f220c363914d1e66970e2d9e599e
|
/python_stack/_django/full_stack/tv_shows/tv_shows_app/migrations/0003_auto_20200604_0321.py
|
4dac5bd390a36b9dc89b42752fc9a70c942eba9d
|
[] |
no_license
|
mpresto/dojo
|
eaccc08465298d35ae5a8e0d60e547a90bc24e05
|
aec14ee041950eea7c35003fa03b0728b4606754
|
refs/heads/master
| 2021-05-26T00:15:16.551562
| 2020-10-04T00:09:48
| 2020-10-04T00:09:48
| 253,975,631
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
# Generated by Django 2.2 on 2020-06-04 03:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tv_shows_app', '0002_auto_20200603_0658'),
]
operations = [
migrations.AlterField(
model_name='show',
name='title',
field=models.CharField(max_length=255),
),
]
|
[
"monty.preston5@gmail.com"
] |
monty.preston5@gmail.com
|
1bd128680cb0beaed2787f330cbaf980eefe2ce1
|
01552dc88e7c170de857f5ff0b52178326d5f003
|
/guild/query_cmd.py
|
00e09e3036d434e24efa3924aaef22b73e522633
|
[
"Apache-2.0"
] |
permissive
|
guildai/_guild-python-legacy
|
b8516f38b3dd4f27859850ec07fe9c4747f4fd8b
|
e552eff820d8edcfeb10b26bd5c8651548507b4a
|
refs/heads/master
| 2021-01-01T15:52:35.875726
| 2017-09-27T18:58:59
| 2017-09-27T18:58:59
| 97,719,256
| 0
| 0
|
Apache-2.0
| 2018-10-20T23:44:54
| 2017-07-19T13:28:21
|
HTML
|
UTF-8
|
Python
| false
| false
| 880
|
py
|
import os
import sys
import guild.cmd_support
import guild.db
import guild.op_util
def main(args):
run = guild.cmd_support.run_for_args(args)
if args.details == "series":
_print_series(run)
elif args.details == "files":
_print_files(run)
else:
_print_header(run)
def _print_header(run):
rundir = run.opdir
run_name = os.path.basename(rundir)
status = guild.op_util.extended_op_status(rundir)
sys.stdout.write("%s\t%s\n" % (run_name, status))
def _print_series(run):
db = guild.db.init_for_opdir(run.opdir)
for key in db.series_keys():
sys.stdout.write(key)
sys.stdout.write("\n")
def _print_files(run):
cwd = os.path.abspath(".")
for root, _dirs, files in os.walk(run.opdir):
for f in files:
path = os.path.join(root, f)
print(os.path.relpath(path, cwd))
|
[
"g@rre.tt"
] |
g@rre.tt
|
bffb24fdd89319ceb3cdb0061dc12f8695ef8b9d
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03157/s695062063.py
|
8c31e11022db6359dcc34abd1270b221bad01b66
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,052
|
py
|
from collections import deque
h,w=map(int,input().split())
p=['-'*(w+2)]
for i in range(h):
p.append('-'+input()+'-')
p.append('-'*(w+2))
isl=[]
v=[[0 for i in range(w+2)] for j in range(h+2)]
d=[[0,1],[1,0],[-1,0],[0,-1]]
def bfs(x,y):
if v[x][y]!=0:
return
q=deque()
q.append((x,y))
v[x][y]=1
br,wh=0,0
cnt=0
while len(q)>0:
ch,cw=q.popleft()
#v[ch][cw]=1
if p[ch][cw]=='#':
br+=1
for dh,dw in d:
if p[ch+dh][cw+dw]=='.' and v[ch+dh][cw+dw]==0:
q.append((ch+dh,cw+dw))
v[ch+dh][cw+dw]=1
elif p[ch][cw]=='.':
wh+=1
for dh,dw in d:
if p[ch+dh][cw+dw]=='#' and v[ch+dh][cw+dw]==0:
q.append((ch+dh,cw+dw))
v[ch+dh][cw+dw]=1
#print('xy=',x,y,'chw=',ch,cw,'bw=',br,wh,q)
isl.append((br,wh))
for i in range(1,h+1):
for j in range(1,w+1):
bfs(i,j)
ans=0
for br,wh in isl:
ans+=br*wh
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
dad6dabb0c0bf27caff808349deda0c12cdca566
|
21bf726bf895569a41a8b8d2db6772dc51f46cfd
|
/MachineLearning/machine_learning_examples/unsupervised_class/hcluster.py
|
4f40738d7ea106f838ee6efa0df6eb069eda4234
|
[] |
no_license
|
jeffsnguyen/Python-1
|
dd924d25337cd6ac21e321d7b2c5ac17c065d94b
|
463d32a61a760d076656c73c9f8c9fadf262438d
|
refs/heads/master
| 2022-03-23T09:50:04.476094
| 2019-12-23T12:32:49
| 2019-12-23T12:32:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,161
|
py
|
# https://deeplearningcourses.com/c/cluster-analysis-unsupervised-machine-learning-python
# https://www.udemy.com/cluster-analysis-unsupervised-machine-learning-python
import numpy as np
import matplotlib.pyplot as plt
from scipy.cluster.hierarchy import dendrogram, linkage
def main():
D = 2 # so we can visualize it more easily
s = 4 # separation so we can control how far apart the means are
mu1 = np.array([0, 0])
mu2 = np.array([s, s])
mu3 = np.array([0, s])
N = 900 # number of samples
X = np.zeros((N, D))
X[:300, :] = np.random.randn(300, D) + mu1
X[300:600, :] = np.random.randn(300, D) + mu2
X[600:, :] = np.random.randn(300, D) + mu3
Z = linkage(X, 'ward')
print "Z.shape:", Z.shape
# Z has the format [idx1, idx2, dist, sample_count]
# therefore, its size will be (N-1, 4)
plt.title("Ward")
dendrogram(Z)
plt.show()
Z = linkage(X, 'single')
plt.title("Single")
dendrogram(Z)
plt.show()
Z = linkage(X, 'complete')
plt.title("Complete")
dendrogram(Z)
plt.show()
if __name__ == '__main__':
main()
|
[
"jerryxyx@163.com"
] |
jerryxyx@163.com
|
0b6e69480371618c0daeb8640584b5d89d5114f4
|
9218fe2f12a3f8209a71a7775178a084da4212c0
|
/crawler/dspider/spiders/stockFinancialDisclosureTimeSpider.py
|
be04e6013553e5a05b5bfff82e3a794c736eec91
|
[] |
no_license
|
betterManzZ/smart_deal_tool
|
678e7f7ecf431df4fb6cef5faf9c5c1ddd397697
|
a74cbab04393d60dc829c0110a98c625ba896f22
|
refs/heads/master
| 2020-06-12T03:31:29.404228
| 2019-06-23T08:05:34
| 2019-06-23T08:05:34
| 194,182,470
| 2
| 0
| null | 2019-06-28T00:57:13
| 2019-06-28T00:57:12
| null |
UTF-8
|
Python
| false
| false
| 7,762
|
py
|
# -*- coding: utf-8 -*-
import os
import re
import time
import datetime
import const as ct
import pandas as pd
from datetime import datetime
from scrapy import FormRequest
from scrapy.http import TextResponse, HtmlResponse
from pyquery import PyQuery as pq
from dspider.myspider import BasicSpider
from urllib.request import urlopen, Request
from base.cdate import report_date_list_with, one_report_date_list
from dspider.straight_flush import StraightFlushSession
from dspider.items import StockFinancialDisclosureTimeItem
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
class StockFinancialDisclosureTimeSpider(BasicSpider):
name = 'stockFinancialDisclosureTimeSpider'
custom_settings = {
'ROBOTSTXT_OBEY': False,
'COOKIES_ENABLED': True,
'RETRY_ENABLED': False,
'REFERER_ENABLED': False,
'SPIDERMON_ENABLED': False,
'DOWNLOAD_DELAY': 5,
'DOWNLOAD_TIMEOUT': 20.0,
'RANDOMIZE_DOWNLOAD_DELAY': True,
'CONCURRENT_REQUESTS_PER_IP': 1,
'CONCURRENT_REQUESTS_PER_DOMAIN': 1,
'DOWNLOADER_MIDDLEWARES': {
'dspider.proxy.RandomProxy':100,
'dspider.user_agent.RandomUserAgent': 200,
'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware': None,
'scrapy.downloadermiddlewares.retry.RetryMiddleware': None
},
'USER_AGENTS': ct.USER_AGENTS,
'SIGNEWNYM_RATE': 60, # new ip rate, minimal value is 60 (seconds)
'PROXY_HOST': 'http://ip_proxy-container:5010',
'NEW_IP_HTTP_CODES': [500, 502, 503, 504, 522, 524, 408, 429, 403, 407, 404]
}
data_dict = dict()
#date_list = report_date_list_with()
date_list = one_report_date_list(datetime.now().strftime('%Y-%m-%d'))
sfsession = StraightFlushSession()
allowed_domains = ['data.10jqka.com.cn', 's.thsi.cn']
start_urls = ['https://s.thsi.cn/js/chameleon/time.{}.js', 'http://data.10jqka.com.cn/financial/yypl/date/{}/board/ALL/field/stockcode/order/DESC/page/{}/ajax/1/']
repatten = 'http://data.10jqka.com.cn/financial/yypl/date/(.+?)/board/ALL/field/stockcode/order/DESC/page/(.+?)/ajax/1/'
headers = {"Accept-Language": "en-US,en;q=0.5","Connection": "keep-alive"}
def start_requests(self):
if len(self.date_list) > 0:
while not self.update_cookie(): time.sleep(3)
mdate = self.date_list.pop()
self.data_dict[mdate] = list()
mcookie = {"v": self.sfsession.encode()}
page_url = self.start_urls[1].format(mdate, 1)
self.logger.info("start_request:%s", page_url)
yield FormRequest(url = page_url, headers = self.headers, cookies = mcookie, method = 'GET', callback = self.parse_item)
def parse_item(self, response):
try:
url = response.url
self.update_cookie()
mcookie = {"v": self.sfsession.encode()}
if type(response) is TextResponse:
time.sleep(60)
print("parse_item3", response.url)
yield FormRequest(url = url, headers = self.headers, cookies = mcookie, method = 'GET', callback = self.parse_item, errback=self.errback_httpbin, dont_filter=True)
else:
reg = re.compile(self.repatten)
if reg.search(url) is not None:
doc = pq(response.text)
max_page = self.get_max_page(doc)
cur_date, cur_page = reg.search(url).groups()
cur_page = int(cur_page)
if not self.update_data(doc, cur_date): print("empty url", url)
if cur_page < max_page:
cur_page += 1
page_url = self.start_urls[1].format(cur_date, cur_page)
print("parse_item1", page_url)
yield FormRequest(url = page_url, headers = self.headers, cookies = mcookie, method = 'GET', callback = self.parse_item, errback=self.errback_httpbin)
else:
self.store_items(cur_date)
if len(self.date_list) > 0:
mdate = self.date_list.pop()
self.data_dict[mdate] = list()
page_url = self.start_urls[1].format(mdate, 1)
print("parse_item2", page_url)
yield FormRequest(url = page_url, headers = self.headers, cookies = mcookie, method = 'GET', callback = self.parse_item, errback=self.errback_httpbin)
else:
print("parse_item4", url)
yield FormRequest(url = url, headers = self.headers, cookies = mcookie, method = 'GET', callback = self.parse_item, errback = self.errback_httpbin, dont_filter = True)
except:
print("parse_item exception", e)
def errback_httpbin(self, failure):
print("errback", repr(failure))
if failure.check(HttpError):
response = failure.value.response
print('HttpError on %s', response.url)
elif failure.check(DNSLookupError):
request = failure.request
print('DNSLookupError on %s', request.url)
elif failure.check(TimeoutError):
request = failure.request
print('TimeoutError on %s', request.url)
else:
request = failure.request
print('Other Error on %s', request.url)
def store_items(self, cur_date):
df = pd.DataFrame(self.data_dict[cur_date], columns=["code", "first", "change", "actual"])
df = df.sort_values(['code'], ascending = 1)
filepath = os.path.join(ct.STOCK_FINANCIAL_REPORT_ANNOUNCEMENT_DATE_PATH, "%s.csv" % cur_date)
df.to_csv(filepath, index=False, mode="w", encoding='utf8')
self.data_dict[cur_date].clear()
def update_cookie(self):
self.sfsession = StraightFlushSession()
time_stamp = int(time.time())
time_url = self.start_urls[0].format(int(time_stamp/1200))
request = Request(time_url)
request.add_header("Connection", "close")
request.add_header("Accept-Language", "en-US,en;q=0.5")
request.add_header("User-Agent", 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36')
try:
response = urlopen(request, timeout=50)
if response.status == 200:
server_time = float(response.read().decode("utf-8").split('=')[1].split(';')[0])
self.sfsession.update_server_time(server_time)
return True
except Exception as e:
print("update_cookie", e)
return False
def get_max_page(self, doc):
span_text = doc("div.m-page.J-ajax-page span").text()
last_page = span_text.split("/")
max_page = int(last_page[1])
return max_page
def update_data(self, doc, cur_date):
tr_node = doc("table tbody tr")
if tr_node.length == 0: return False
for tr in tr_node.items():
code = tr.children("td").eq(1).text().strip(' ') #股票代码
first = tr.children("td").eq(3).text().strip(' ') # 首次预约时间
changed = tr.children("td").eq(4).text().strip(' ') # 变更时间
actual = tr.children("td").eq(5).text().strip(' ') # 实际披露时间
first = first.replace("-", "").replace("00000000", "")
changed = changed.replace("-", "")
actual = actual.replace("-", "").replace("00000000", "")
self.data_dict[cur_date].append([code, first, changed, actual])
return True
|
[
"hellobiek@gmail.com"
] |
hellobiek@gmail.com
|
f4be8acda03bcb0ffbd7f9c766fc0c1947499472
|
7370127fe73970fdf0882f0696c1dbbf1e818745
|
/pds-queries/2020-spring-census/list-unresponsive.py
|
8fe4b8cd3cd420681dcd508b13497b717197e856
|
[] |
no_license
|
epiphany40223/epiphany
|
ab5ef0590ac67d2e353592c45177b8e5f7e22457
|
32956e735f0c5e3fc9231449796431d23b4817f0
|
refs/heads/main
| 2023-09-01T05:12:17.013064
| 2023-08-27T19:57:16
| 2023-08-27T19:57:16
| 41,978,574
| 5
| 11
| null | 2023-09-11T02:00:09
| 2015-09-05T23:06:07
|
Python
|
UTF-8
|
Python
| false
| false
| 5,765
|
py
|
#!/usr/bin/env python3
# Basic script to create a list of families which have not responded to the
# 2020 spring census. This version is based on a CSV file import, so you will
# need to retrieve the latest file.
import sys
sys.path.insert(0, '../../python')
import csv
import ECC
import PDSChurch
import helpers
from constants import jotform_member_fields
from pprint import pprint
from pprint import pformat
#############################################################################
def read_jotform_results(filename, log):
log.info("Reading results spreadsheet...")
fids = dict()
with open(filename, encoding='utf-8') as csvfile:
csvreader = csv.DictReader(csvfile)
for row in csvreader:
fids[int(row['fid'])] = True
l = len(fids.keys())
log.info(f"Found {l} unique Familes in the Jotform results")
return fids.keys()
#############################################################################
# Of the families in the PDS database, find the ones with:
# - a spouse with a valid email address, or
# - a head of household with a valid email address, or
# - a Famile with a valid email address
def find_pds_census_families(log):
log.info("Loading PDS database...")
# Load the PDS database
(pds, families,
members) = PDSChurch.load_families_and_members(filename='pdschurch.sqlite3',
log=log)
# Search for Families that match the desired criteria
# Do it in FID order, just for repeatability
output_families = list()
family_only_emails = dict()
fids = sorted(families)
for fid in fids:
f = families[fid]
# We skipped some Families with too many Members
if len(f['members']) > len(jotform_member_fields):
log.debug(f"--- Skipping Familiy {f['Name']} because they have too many Members")
continue
have_email = False
for m in f['members']:
if helpers.member_is_hoh_or_spouse(m):
em = PDSChurch.find_any_email(m)
if em:
have_email = True
break
# If we have no email, check the Family record itself for an email
if not have_email:
em = PDSChurch.find_any_email(f)
if f:
# Sadness. This looks like a bug in make-and-send-emails.py :-(
#have_email = True
log.info(f"Family-only email: {f['Name']} / fid {fid} / env {f['ParKey']}")
family_only_emails[fid] = f
# We have no email for the Family. Get phone numbers.
if not have_email:
log.debug(f"--- Have no HoH/Spouse/Family emails for Family {f['Name']} -- skipping")
continue
log.debug(f"+++ Family {f['Name']} has an email address")
output_families.append(f)
l = len(output_families)
log.info(f"Found {l} PDS Families with emails")
l = len(family_only_emails)
log.info(F"Found {l} PDS Familes with Family-only email")
return output_families, family_only_emails
#############################################################################
def check_families_only_email_results(families_only_email, fids_replied, log):
for fid, family in families_only_email.items():
if fid in fids_replied:
log.info(f"Happy day! Family-only email FID {fid} has Jotform results!")
#############################################################################
def cross_reference(families_with_email, fids_replied, log):
not_replied_envelope_ids = list()
not_replied_fids = list()
for family in families_with_email:
fid = family['FamRecNum']
if fid not in fids_replied:
log.debug(f"Family did NOT reply: {family['Name']} ({fid} / {family['FamRecNum']})")
not_replied_envelope_ids.append(family['ParKey'].strip())
not_replied_fids.append(fid)
else:
log.debug(f"Family did reply: {family['Name']} ({fid} / {family['FamRecNum']})")
# JMS DOUBLE CHECK
for fid in not_replied_fids:
if fid in fids_replied:
log.error(f"ERROR: Found double FID! {fid}")
return not_replied_envelope_ids
#############################################################################
def write_output_files(not_replied_envelope_ids, filename_base, num_per_file, log):
ids = not_replied_envelope_ids.copy()
file_number = 1
while len(ids) > 0:
ids_to_write = ids[:num_per_file]
if len(ids_to_write) <= 0:
break
filename = f'{filename_base}.{file_number}.txt'
with open(filename, 'w') as f:
f.write(','.join(ids_to_write) + '\n')
l = len(ids_to_write)
log.info(f"Wrote {l} envelope IDs to {filename}")
ids = ids[num_per_file:]
file_number += 1
#############################################################################
def main():
log = ECC.setup_logging(debug=False)
# Read in the Jotform results
filename = 'ECC census update - Sheet1.csv'
fids_replied = read_jotform_results(filename, log)
# Read in PDS Families with emails
families_with_email, families_only_email = find_pds_census_families(log)
# Check for Family-only emails in the results
check_families_only_email_results(families_only_email, fids_replied, log)
# Cross reference the two lists and see what PDS Families with emails
# did not respond to the census
not_replied_envelope_ids = cross_reference(families_with_email, fids_replied, log)
# Write output files
filename_base = 'unresponsives'
write_output_files(not_replied_envelope_ids, filename_base, 100, log)
main()
|
[
"jeff@squyres.com"
] |
jeff@squyres.com
|
7f5067f65e16a598942794e6419451da5869da52
|
c65af972b843e4f11a9aa9005104ac54a283032d
|
/practice2/second.py
|
2dc4335141cb0ae8f34bc1f2bca7306385900aba
|
[] |
no_license
|
ljeleven/mypython
|
a63438c4246606082f000967a5d47256fa297aeb
|
b652338be3937543f0b35a9111dd0d346eb913b5
|
refs/heads/master
| 2023-05-24T19:30:37.001198
| 2020-04-09T15:40:40
| 2020-04-09T15:40:40
| 240,815,098
| 0
| 0
| null | 2023-05-22T22:41:00
| 2020-02-16T01:46:29
|
Python
|
UTF-8
|
Python
| false
| false
| 728
|
py
|
#__author:"longjin"
#date: 2019/6/9
def bonus(n):
bonus = 0
if n <= 100000:
bonus = n*0.1
elif 100000 < n <= 200000:
bonus = 100000*0.1 + (n-100000)*0.075
elif 200000 < n <= 400000:
bonus = 100000*0.1 + 100000*0.075 + (n-200000)*0.05
elif 400000 < n <= 600000:
bonus = 100000 * 0.1 + 100000 * 0.075 + 200000 * 0.05 + (n-400000)*0.03
elif 600000 < n <= 1000000:
bonus = 100000 * 0.1 + 100000 * 0.075 + 200000 * 0.05 + 200000 * 0.03 + (n-600000)*0.015
elif 10000000 < n:
bonus = 100000 * 0.1 + 100000 * 0.075 + 200000 * 0.05 + 200000*0.03 + 400000*0.015 + (n-1000000)*0.01
return bonus
n = int(input('please input your profit: '))
print(bonus(n))
|
[
"ljeleven@foxmail.com"
] |
ljeleven@foxmail.com
|
abc96381576d91e73fd9c07972847fb15b2ae392
|
9b422078f4ae22fe16610f2ebc54b8c7d905ccad
|
/xlsxwriter/test/comparison/test_ignore_error05.py
|
63452115f9c827a96269ffee2d9f92da2fd2a24a
|
[
"BSD-2-Clause-Views"
] |
permissive
|
projectsmahendra/XlsxWriter
|
73d8c73ea648a911deea63cb46b9069fb4116b60
|
9b9d6fb283c89af8b6c89ad20f72b8208c2aeb45
|
refs/heads/master
| 2023-07-21T19:40:41.103336
| 2023-07-08T16:54:37
| 2023-07-08T16:54:37
| 353,636,960
| 0
| 0
|
NOASSERTION
| 2021-04-01T08:57:21
| 2021-04-01T08:57:20
| null |
UTF-8
|
Python
| false
| false
| 1,015
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2019, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('ignore_error05.xlsx')
self.ignore_files = ['xl/calcChain.xml', '[Content_Types].xml', 'xl/_rels/workbook.xml.rels']
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write_string('A1', '123')
worksheet.write_formula('A2', '=1/0', None, '#DIV/0!')
worksheet.ignore_errors({'number_stored_as_text': 'A1', 'eval_error': 'A2'})
workbook.close()
self.assertExcelEqual()
|
[
"jmcnamara@cpan.org"
] |
jmcnamara@cpan.org
|
c25b620107cf61d98745b98607852eb71b1016f7
|
bf9a77bd51ba2dd5bf9c6e7cbf0ec9ec403f0b4f
|
/tests/test_lib_wordpress.py
|
71bdb09f5d3ff37d3d02219681a284be863e3876
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
s672/clld
|
bf3e53698ef19b71181ca8e837b863d8ea423afe
|
cce7abeb504e0e29b61e7d14e93a1dc1d2294a3b
|
refs/heads/master
| 2023-03-30T06:22:06.732159
| 2021-04-07T06:57:20
| 2021-04-07T06:57:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,419
|
py
|
from clld.lib.wordpress import *
def _requests(mocker, c, status=200):
return mocker.Mock(get=lambda *a, **kw: mocker.Mock(text=c, status_code=status))
def test_sluggify():
assert sluggify('a and B') == 'a-and-b'
def test_Client(mocker):
client = Client('blog.example.org', 'user', 'password')
mocker.patch('clld.lib.wordpress.requests', _requests(mocker, '', status=404))
client.get_post_id_from_path('/post')
mocker.patch('clld.lib.wordpress.requests', _requests(mocker, '<div class="post" id="post-1">'))
client.get_post_id_from_path('/post')
mocker.patch(
'clld.lib.wordpress.requests',
_requests(mocker, '<input type="hidden" name="comment_post_ID" value="1" />'))
client.get_post_id_from_path('/post')
client.server = mocker.MagicMock()
client.set_categories([{'name': 'cat', 'description': 'desc'}])
client.set_categories([{'name': 'cat', 'description': 'desc'}], post_id=3)
client.create_post(
'title', 'content',
date=1,
tags=['tag'],
custom_fields={'a': 'x'},
categories=[{'name': 'cat', 'description': 'desc'}])
client.server = mocker.MagicMock(wp=mocker.Mock(getCategories=mocker.Mock(return_value=[{
'categoryName': 'n', 'categoryId': '1'}])))
client.get_categories()
client.get_categories(name='n')
client.set_categories([{'name': 'n', 'description': 'desc'}])
|
[
"xrotwang@googlemail.com"
] |
xrotwang@googlemail.com
|
ff1b51789b5b92e740b1589c7ae516bcf3bfc011
|
a342b1d6c7451cf3982b835dfc81924efe0509b4
|
/tests/fixpath.py
|
6e87fe37d75741be878ccbd0cf94c64ffdc6390d
|
[] |
no_license
|
phaustin/eoas_canvas
|
23bbc27a99f5d0654dce13be3fc3cbcc022d9250
|
79544df2095c7e536f35c29bbd9f568a0ff3633c
|
refs/heads/master
| 2021-04-03T05:12:50.232463
| 2019-03-01T01:21:31
| 2019-03-01T01:21:31
| 124,586,559
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 360
|
py
|
"""
import this in your main function to add the parent
to the folder holding the script to the front of sys.path
"""
from pathlib import Path
import sys, os
import site
the_path=Path(sys.argv[0]).resolve()
print(f'fixpath: inserting package directory in path: {the_path}')
the_path=the_path.parents[1]
sys.path.insert(0, str(the_path))
site.removeduppaths()
|
[
"paustin@eos.ubc.ca"
] |
paustin@eos.ubc.ca
|
5e52e7f42b5a8b03de3b12096468a57d982485b9
|
1cfb54adac19bfd69cc58ab23918925a800494c4
|
/youtube_project/youtube_app/urls.py
|
5cce935a401be7be76fbf852834bf410a85a1441
|
[] |
no_license
|
SatishNitk/youtube_clone
|
f743bae04190d4a3c0881a2a1b3daf23d9d9e468
|
698c94d5ef9689428da6a35b01928fd071978772
|
refs/heads/master
| 2020-06-18T20:08:56.113547
| 2019-07-28T04:56:52
| 2019-07-28T04:56:52
| 196,431,093
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 442
|
py
|
from django.urls import path,include
from youtube_app.views import *
urlpatterns = [
path('', HomeView.as_view()),
path('login/',LoginView.as_view()),
path('register/',RegisterView.as_view()),
path('video/',NewVideo.as_view()),
path('logout/',LogoutView.as_view()),
path('comment/',CommentView.as_view()),
path('video/<int:id>', VideoView.as_view()),
path('get_video/<file_name>', VideoFileView.as_view()),
]
|
[
"satishkrgu95@gmail.com"
] |
satishkrgu95@gmail.com
|
a8567ccd2b5a4624126ca8ab8456180bbdc05fc2
|
64cee8c8f33ae6be8edf0daa7a3a83efee86c82c
|
/doc/source/conf.py
|
654ff054fca0e5c2a439fd8b5ba07d4f6f22ccd0
|
[
"MIT"
] |
permissive
|
shengyongniu/cemba_data
|
52881061dac63c5dca4bbedf9bc7f1f345b13575
|
6d076ed7f19ac76650d91fe9172393cc6c10e686
|
refs/heads/master
| 2021-10-09T14:31:43.849987
| 2018-12-29T23:19:53
| 2018-12-29T23:19:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,946
|
py
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../cemba_data'))
# -- Project information -----------------------------------------------------
project = 'cemba_data'
copyright = '2018, Hanqing Liu'
author = 'Hanqing Liu'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.1.1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'cemba_datadoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'cemba_data.tex', 'cemba\\_data Documentation',
'Hanqing Liu', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cemba_data', 'cemba_data Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'cemba_data', 'cemba_data Documentation',
author, 'cemba_data', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for t_o_d_o extension ----------------------------------------------
# If true, `t_o_d_o` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
|
[
"liuhq@ucsd.edu"
] |
liuhq@ucsd.edu
|
6d9f788e635eba1977d3eee0524bb2eada7ec450
|
5f1c3a2930b20c3847496a249692dc8d98f87eee
|
/Pandas/Excel_DataAnalysis/Question5.py
|
272e1db4934d35b2c84d146a8f2e7f6ac7cee2c9
|
[] |
no_license
|
AmbyMbayi/CODE_py
|
c572e10673ba437d06ec0f2ae16022d7cbe21d1c
|
5369abf21a8db1b54a5be6cbd49432c7d7775687
|
refs/heads/master
| 2020-04-24T05:01:46.277759
| 2019-02-22T08:26:04
| 2019-02-22T08:26:04
| 171,723,155
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
"""Write a pandas program to insert a column in the sixth position of the said excel sheet and fill it with NaN values
"""
import pandas as pd
import numpy as np
df = pd.read_excel('coalpublic2013.xls')
df.insert(3, "column1", np.nan)
print(df.head)
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
315888a36f3671ad0377b488fbeba896827df303
|
09fd456a6552f42c124c148978289fae1af2d5c3
|
/Graph/210.py
|
b37dc35636fc78cbaea209ecf36234d07c89537d
|
[] |
no_license
|
hoang-ng/LeetCode
|
60b4e68cbcf54cbe763d1f98a70f52e628ab32fb
|
5407c6d858bfa43325363503c31134e560522be3
|
refs/heads/master
| 2021-04-10T11:34:35.310374
| 2020-07-28T10:22:05
| 2020-07-28T10:22:05
| 248,932,393
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,123
|
py
|
# 210. Course Schedule II
# There are a total of n courses you have to take, labeled from 0 to n-1.
# Some courses may have prerequisites, for example to take course 0 you have to first take course 1, which is expressed as a pair: [0,1]
# Given the total number of courses and a list of prerequisite pairs, return the ordering of courses you should take to finish all courses.
# There may be multiple correct orders, you just need to return one of them. If it is impossible to finish all courses, return an empty array.
# Example 1:
# Input: 2, [[1,0]]
# Output: [0,1]
# Explanation: There are a total of 2 courses to take. To take course 1 you should have finished
# course 0. So the correct course order is [0,1] .
# Example 2:
# Input: 4, [[1,0],[2,0],[3,1],[3,2]]
# Output: [0,1,2,3] or [0,2,1,3]
# Explanation: There are a total of 4 courses to take. To take course 3 you should have finished both
# courses 1 and 2. Both courses 1 and 2 should be taken after you finished course 0.
# So one correct course order is [0,1,2,3]. Another correct ordering is [0,2,1,3] .
# Note:
# The input prerequisites is a graph represented by a list of edges, not adjacency matrices. Read more about how a graph is represented.
# You may assume that there are no duplicate edges in the input prerequisites.
import collections
class Solution:
def findOrder(self, numCourses, prerequisites):
graph = collections.defaultdict(list)
res = []
for u, v in prerequisites:
graph[u].append(v)
visited = [0 for x in range(numCourses)]
for i in range(numCourses):
if not self.dfs(i, graph, visited, res):
return []
return res
def dfs(self, node, graph, visited, res):
if visited[node] == -1:
return False
if visited[node] == 1:
return True
visited[node] = -1
for i in graph[node]:
if not self.dfs(i, graph, visited, res):
return False
visited[node] = 1
res.append(node)
return True
|
[
"hoang2109@gmail.com"
] |
hoang2109@gmail.com
|
ce55db8946daf7db06859e2c0e253fee52256393
|
039f2c747a9524daa1e45501ada5fb19bd5dd28f
|
/Typical DP Contest/TDPCa2.py
|
0e482b3d312d102ca64129efa5e8468a4aad6f79
|
[
"Unlicense"
] |
permissive
|
yuto-moriizumi/AtCoder
|
86dbb4f98fea627c68b5391bf0cc25bcce556b88
|
21acb489f1594bbb1cdc64fbf8421d876b5b476d
|
refs/heads/master
| 2023-03-25T08:10:31.738457
| 2021-03-23T08:48:01
| 2021-03-23T08:48:01
| 242,283,632
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 481
|
py
|
n = int(input())
p = list(map(int, input().split()))
dp = [[None for i in range(sum(p)+1)] for _ in range(n+1)]
def canMake(i, v):
if (i == 0):
return v == 0
if (dp[i][v] != None):
return dp[i][v]
# print(i)
if (v - p[i-1] >= 0):
dp[i][v] = canMake(i-1, v) or canMake(i-1, v-p[i-1])
else:
dp[i][v] = canMake(i-1, v)
return dp[i][v]
ans = 0
for i in range(sum(p) + 1):
if (canMake(n, i)):
ans += 1
print(ans)
|
[
"kurvan1112@gmail.com"
] |
kurvan1112@gmail.com
|
902bff8182253dc5452804b38313cae134a0b77a
|
e24cdd3433911fb9e7193de488811c80d5b97746
|
/ByTags/Design/362. Design Hit Counter.py
|
b74743e6ef81c7fa6ee85683eb7f02fb3da89e51
|
[] |
no_license
|
lynkeib/LeetCode
|
753f6a07270d956ca802632edfb0480029fe6f51
|
8a82905d40b882b20a9b6f862942f8f3e4bebcf0
|
refs/heads/master
| 2021-06-16T11:33:17.830068
| 2021-02-18T07:17:20
| 2021-02-18T07:17:20
| 165,439,180
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,156
|
py
|
class HitCounter(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.counter = [[0, i + 1] for i in range(300)]
def hit(self, timestamp):
"""
Record a hit.
@param timestamp - The current timestamp (in seconds granularity).
:type timestamp: int
:rtype: None
"""
index = (timestamp - 1) % 300
if self.counter[index][1] == timestamp:
self.counter[index][0] += 1
else:
self.counter[index][0] = 1
self.counter[index][1] = timestamp
def getHits(self, timestamp):
"""
Return the number of hits in the past 5 minutes.
@param timestamp - The current timestamp (in seconds granularity).
:type timestamp: int
:rtype: int
"""
res = 0
for x in self.counter:
hits, time = x[0], x[1]
if timestamp - time < 300:
res += hits
return res
# Your HitCounter object will be instantiated and called as such:
# obj = HitCounter()
# obj.hit(timestamp)
# param_2 = obj.getHits(timestamp)
|
[
"liuchengyin1234@126.com"
] |
liuchengyin1234@126.com
|
038a2afe4dbd87146aadaa7a5e7d1b80e3d07b78
|
0306bea08e9aab18f34a799ce8a73e86921f90f7
|
/medium/EvaluareReversePolishNotation.py
|
f4166dc5308c8152784f3331a6b0a9e6cfd0e66d
|
[] |
no_license
|
GeorgianBadita/LeetCode
|
78686fde88ef65b64f84fb7c2a22ba37ef21b8d9
|
e3b0571182369c5308e0c29fb87106bb0b0d615a
|
refs/heads/master
| 2022-10-21T00:23:26.479943
| 2022-10-14T20:27:27
| 2022-10-14T20:27:27
| 251,733,951
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,047
|
py
|
# https://leetcode.com/explore/interview/card/top-interview-questions-medium/114/others/823/
from typing import List
class Solution:
def __init__(self) -> None:
self.__operators = "+-*/"
def apply_operator(self, op1, op, op2):
if op == '+':
return op1 + op2
if op == '-':
return op1 - op2
if op == '/':
return int(op1 / op2)
if op == '*':
return op1 * op2
def evalRPN(self, tokens: List[str]) -> int:
if not tokens:
return 0
operands_stack = []
for token in tokens:
if token in self.__operators:
op2 = operands_stack.pop()
op1 = operands_stack.pop()
operands_stack.append(self.apply_operator(op1, token, op2))
else:
operands_stack.append(int(token))
return operands_stack.pop()
print(Solution().evalRPN(["10", "6", "9", "3", "+",
"-11", "*", "/", "*", "17", "+", "5", "+"]))
|
[
"geo.badita@gmail.com"
] |
geo.badita@gmail.com
|
28140476e361402bc2865261bdff072d090b730d
|
b5cba88ce8c86740c8c3453134610fd5bafbb8c4
|
/Leetcode/17. Letter Combinations of a Phone Number/solution.py
|
d023f235fd394de279c5c4e274ffb3348e3e229f
|
[] |
no_license
|
EduardoSantos7/Algorithms4fun
|
55fcf9d515ea3b70b93298ac96a58d2ae68dee11
|
6ff182ed596b6322322b087f29e6ad98baec3f97
|
refs/heads/master
| 2023-07-23T01:38:08.216313
| 2023-07-23T01:35:58
| 2023-07-23T01:35:58
| 227,448,848
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 716
|
py
|
class Solution:
def letterCombinations(self, digits: str) -> List[str]:
if not digits:
return []
letters = {
'2': ['a', 'b', 'c'],
'3': ['d', 'e', 'f'],
'4': ['g', 'h', 'i'],
'5': ['j', 'k', 'l'],
'6': ['m', 'n', 'o'],
'7': ['p', 'q', 'r', 's'],
'8': ['t', 'u', 'v'],
'9': ['w', 'x', 'y', 'z'],
}
ans = []
def helper(acum):
if acum and len(acum) == len(digits):
ans.append(acum)
return
for letter in letters[digits[len(acum)]]:
helper(acum + letter)
helper('')
return ans
|
[
"eduardoluissd@gmail.com"
] |
eduardoluissd@gmail.com
|
550581cdb8b24a81f41dad30bf26ba3cb86a88a4
|
505963904ce8fedd73caf562ffe993feb98e1043
|
/home/urls.py
|
113b829e90992f2b6b7a4471dee27cb26be8ce71
|
[] |
no_license
|
madmax330/Jobin
|
05f0c3be31c1fce10d8df48047affd78c57c45ed
|
b06d04878ff9f4de1bf9d8cd64cd1c4322610d31
|
refs/heads/Develop
| 2020-12-08T22:22:44.283964
| 2018-05-30T07:23:27
| 2018-05-30T07:23:27
| 67,643,755
| 0
| 2
| null | 2018-05-30T07:12:30
| 2016-09-07T21:07:10
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,426
|
py
|
from django.conf.urls import url
from . import views
app_name = 'home'
urlpatterns = [
url(r'^$', views.index_view, name='index'),
url(r'^login/student/$', views.login_student, name='student_login'),
url(r'^login/company/$', views.login_company, name='company_login'),
url(r'^send/contact/message/$', views.send_contact_message, name='send_contact_message'),
url(r'^register/student/$', views.register_student, name='register_student'),
url(r'^register/company/$', views.register_company, name='register_company'),
url(r'^verify/$', views.verify, name='verify'),
url(r'^activate/company/(?P<key>.+)/$', views.activate_company, name='activate_company'),
url(r'^activate/student/(?P<key>.+)/$', views.activate_student, name='activate_student'),
url(r'^new-activation/$', views.new_verification, name='new_activation'),
url(r'^new/password/(?P<ut>\w+)/$', views.new_password_view, name='new_password'),
url(r'^change/user/info/(?P<ut>\w+)/$', views.ChangeUserInfo.as_view(), name='change_info'),
url(r'^logout/$', views.user_logout, name='logout'),
url(r'^privacy-policy/$', views.privacy_policy, name='policy'),
url(r'^terms-and-conditions/$', views.terms_and_conditions, name='terms'),
url(r'^create/content/(?P<n>[0-9]+)/$', views.create_test_content, name='gen_content'),
url(r'^clear/content/$', views.clear_test_content, name='clear_content'),
]
|
[
"madmax330@yahoo.com"
] |
madmax330@yahoo.com
|
8bd8704b983c3d96858f8d0d288946e59f30920a
|
10bfea81cdde6710c6abd4a4ef48a99112d286f8
|
/crm/models.py
|
7505c38e26858acf86545000549f77849846fab2
|
[] |
no_license
|
egAhmed/Django_KMA_Apps
|
f6b3971a5d2d08f91d7e6d2d76208db1e2877f4e
|
83a7491b8c5afe6f60ab78d9bdb826b783c80d08
|
refs/heads/master
| 2022-12-02T10:28:09.526611
| 2020-08-18T13:24:19
| 2020-08-18T13:24:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,911
|
py
|
from django.db import models
from django.urls import reverse
# from django.contrib.admin import widgets
# from datetime import datetime
# from django.utils import timezone
# from django.utils.timezone import now
# Create your models here.
class Registration(models.Model):
name = models.CharField(max_length=150)
username = models.CharField(max_length=50)
email = models.EmailField(blank=True, max_length=254)
password = models.CharField(max_length=50)
bio = models.TextField(blank=True)
#this class for
# class Choices(models.Model):
# description = models.CharField(max_length=100)
class Clients(models.Model):
# record = models.ForeignKey(RecordFirm, on_delete=models.CASCADE)
name = models.CharField(max_length=200)
phone = models.CharField(max_length=20, blank=True, null=True)
mobile = models.CharField(max_length=20, blank=True, null=True)
address = models.CharField(max_length=200, blank=True, null=True)
photo = models.ImageField(upload_to='Clients_pics', blank=True)
identityNo = models.CharField(max_length=200, blank=True, null=True)
notes = models.CharField(max_length=2000, blank=True, null=True)
def __str__(self):
return self.name #+ " | " + str(self.photo)
def get_absolute_url(self):
return reverse('crm:clients_update', kwargs={'id': self.id})
def goto_home(self):
return reverse('crm:home')
class RecordFirm(models.Model):
#now = timezone.now()
Currency = (
('EGY', 'Egy Pound'),
('USD', 'US Dollar')
)
# Tax_Choice = ('taxno', 'Tax No.')
# Part_Choice=('partno', 'Part No.')
# Purchase_Choice=('purchaseno', 'Purchase No.')
client_id = models.ForeignKey(Clients,
on_delete=models.CASCADE,
default=False,
null=False)
firm_name = models.CharField(max_length=200,
blank=True,
null=True,
name='Company Name') # name= 'Company Name'
manager = models.CharField(max_length=200, blank=True, null=True)
repres_name = models.CharField(max_length=200, blank=True, null=True)
last_visit = models.DateField()
notes = models.TextField()
type = models.CharField(max_length=3, choices=Currency, null=True)
#paper = models.ManyToManyField(Choices)
tax_no = models.BooleanField(default=False)
part_no = models.BooleanField(default=False)
purchase_no = models.BooleanField(default=False)
# client_id = models.
def __str__(self):
return self.client_id
def get_url(self):
return reverse('crm:firm_update', kwargs={'id': self.id})
def go_home(self):
return reverse('crm:regdata') # , kwargs={'id': self.id}
|
[
"aboobida2002@gmail.com"
] |
aboobida2002@gmail.com
|
d1acfdd2bd0014aee0b1f83318cc3cd27a0d2093
|
db0b0935c069a877a7e583cc6cbbe006f3ea271d
|
/Section2/053.py
|
bc374f36e5ecb74356822b407f3291a39fd70eda
|
[] |
no_license
|
partrita/biopython
|
9294033c8809441190ea861e484b7678dbb2909a
|
a488559820980bd054b67395756e01cffa738965
|
refs/heads/master
| 2020-05-05T13:28:12.465422
| 2019-04-08T06:03:49
| 2019-04-08T06:03:49
| 180,078,766
| 4
| 1
| null | 2019-04-08T05:51:39
| 2019-04-08T05:51:39
| null |
UTF-8
|
Python
| false
| false
| 180
|
py
|
#053.py
a = [3, 5, 2, 1, 4]
b = [8, 10, 7, 6, 9]
print("sorted(a)")
print(sorted(a, reverse=True))
print("a")
print(a)
print("")
b.sort(reverse=True)
print("b.sort()")
print(b)
|
[
"partrita@gmail.com"
] |
partrita@gmail.com
|
861c5f70e7ecb76286b4a0b2647c81904eed9c51
|
44b2743ff70ce0631e9714ce78c44720fa63a9ad
|
/app/config/urls.py
|
00d26fff98d1af869b8364b221e013f52296ccf8
|
[
"MIT"
] |
permissive
|
hoelsner/product-database
|
1b1b4db8e968f5bc149605093e4639c48a9ae1ad
|
c649569fb82bc4b0a5e9ef9615fff8a364ce652f
|
refs/heads/master
| 2023-07-24T21:39:01.870692
| 2023-07-09T17:03:56
| 2023-07-09T17:03:56
| 43,767,455
| 43
| 27
|
MIT
| 2023-04-16T19:17:25
| 2015-10-06T17:44:50
|
Python
|
UTF-8
|
Python
| false
| false
| 683
|
py
|
"""
Product Database Config URL configuration (namespace "productdb_config")
"""
from django.conf.urls import url
from app.config import views
# namespace: productdb_config
urlpatterns = [
# user views
url(r'^change/$', views.change_configuration, name='change_settings'),
url(r'^status/$', views.status, name='status'),
url(r'^flush_cache/$', views.flush_cache, name='flush_cache'),
url(r'^messages/$', views.server_messages_list, name='notification-list'),
url(r'^messages/add/$', views.add_notification, name='notification-add'),
url(r'^messages/(?P<message_id>\d+)/$', views.server_message_detail, name='notification-detail'),
]
app_name = "config"
|
[
"henry@codingnetworker.com"
] |
henry@codingnetworker.com
|
171a3cc32d02abfa9872ea2167a3e1182d2aae6a
|
a6e4a6f0a73d24a6ba957277899adbd9b84bd594
|
/sdk/python/pulumi_azure_native/consumption/latest/__init__.py
|
f3e9e2805ca06f72ddd2b242d2c80d0413d13a4a
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
MisinformedDNA/pulumi-azure-native
|
9cbd75306e9c8f92abc25be3f73c113cb93865e9
|
de974fd984f7e98649951dbe80b4fc0603d03356
|
refs/heads/master
| 2023-03-24T22:02:03.842935
| 2021-03-08T21:16:19
| 2021-03-08T21:16:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,283
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from ._enums import *
from .budget import *
from .budget_by_resource_group_name import *
from .get_budget import *
from .get_budget_by_resource_group_name import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-native:consumption/latest:Budget":
return Budget(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:consumption/latest:BudgetByResourceGroupName":
return BudgetByResourceGroupName(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-native", "consumption/latest", _module_instance)
_register_module()
|
[
"noreply@github.com"
] |
MisinformedDNA.noreply@github.com
|
a709e84115ad3c609b6e6e41d13916caa6b916ed
|
6044b98e65c38233fb587b3fd40290a7be0b4c6d
|
/uni_ticket/migrations/0036_ticketreply_read.py
|
592ee30f86fc243801323666f87cac4ffb684a03
|
[
"Apache-2.0"
] |
permissive
|
libremente/uniTicket
|
f5bef4ff85edb03a799b5e87a49050becd1822fa
|
6f41f0ce9bd0f1238bffcde1c4e12a38266c781b
|
refs/heads/master
| 2022-04-23T05:21:39.528135
| 2020-04-22T16:11:29
| 2020-04-22T16:11:29
| 257,953,451
| 0
| 0
|
Apache-2.0
| 2020-04-22T16:11:01
| 2020-04-22T16:11:00
| null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
# Generated by Django 2.2.3 on 2019-07-30 09:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('uni_ticket', '0035_auto_20190725_1632'),
]
operations = [
migrations.AddField(
model_name='ticketreply',
name='read',
field=models.BooleanField(default=False),
),
]
|
[
"francesco.filicetti@unical.it"
] |
francesco.filicetti@unical.it
|
02ece8aff15fad1464676b60c95f4ee8493c447c
|
31a928cff4960236923b6bc3b68e34bb2f46f470
|
/ctc-executioner/setup.py
|
768b03ff56cb33d23f6cf759647c444e12ae35e2
|
[
"BSD-3-Clause"
] |
permissive
|
webclinic017/ml_monorepo
|
707df2afd2f986eb0721d26430e6135c917817c6
|
945f0a83d6b94282c547bb6f4805f3381ad9c16a
|
refs/heads/master
| 2021-10-19T21:02:53.322944
| 2019-02-19T20:58:51
| 2019-02-23T20:06:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 291
|
py
|
from setuptools import setup
setup(name='gym_ctc_executioner',
packages=['gym_ctc_executioner'],
version='0.0.1',
install_requires=['gym']
)
setup(name='gym_ctc_marketmaker',
packages=['gym_ctc_marketmaker'],
version='0.0.1',
install_requires=['gym']
)
|
[
"tmichael.yu@gmail.com"
] |
tmichael.yu@gmail.com
|
e1be4b994675e0158566681530cd9169bb10ece9
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/p3BR/R1/benchmark/startPyquil447.py
|
b50d0c38f7773bb40682486d38291f1927f81bbc
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,898
|
py
|
# qubit number=2
# total number=83
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += H(1) # number=70
prog += RX(-0.09738937226128368,2) # number=2
prog += H(1) # number=33
prog += Y(2) # number=56
prog += CZ(2,1) # number=34
prog += H(1) # number=35
prog += H(1) # number=3
prog += H(0) # number=45
prog += H(1) # number=77
prog += CZ(2,1) # number=78
prog += H(1) # number=79
prog += CZ(1,0) # number=46
prog += H(0) # number=47
prog += Y(1) # number=15
prog += H(0) # number=66
prog += CZ(1,0) # number=67
prog += H(0) # number=68
prog += H(1) # number=19
prog += CZ(0,1) # number=20
prog += RX(-0.6000441968356504,1) # number=28
prog += H(1) # number=21
prog += H(1) # number=30
prog += CZ(0,1) # number=31
prog += H(1) # number=32
prog += H(1) # number=57
prog += CZ(0,1) # number=58
prog += H(1) # number=59
prog += CNOT(0,1) # number=51
prog += CNOT(0,1) # number=71
prog += X(1) # number=72
prog += CNOT(0,1) # number=73
prog += CNOT(0,1) # number=53
prog += H(1) # number=80
prog += CZ(0,1) # number=81
prog += H(1) # number=82
prog += Y(2) # number=69
prog += H(2) # number=29
prog += H(1) # number=36
prog += X(1) # number=64
prog += CZ(0,1) # number=37
prog += Y(2) # number=44
prog += H(1) # number=38
prog += Z(1) # number=55
prog += H(1) # number=61
prog += CZ(0,1) # number=62
prog += Z(2) # number=65
prog += H(1) # number=63
prog += Z(1) # number=11
prog += RX(-1.1780972450961724,2) # number=54
prog += H(1) # number=42
prog += H(0) # number=39
prog += CZ(1,0) # number=40
prog += H(0) # number=41
prog += CNOT(2,1) # number=26
prog += Y(1) # number=14
prog += CNOT(1,0) # number=5
prog += CNOT(0,1) # number=74
prog += X(1) # number=75
prog += CNOT(0,1) # number=76
prog += Z(1) # number=8
prog += X(1) # number=7
prog += H(2) # number=43
prog += RX(-2.42845112122491,1) # number=25
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('1q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil447.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.