blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3a559961d460a100177dfb05a88dd44d35beacac
|
17993dcca87d490bc9841437309f309a5592ab38
|
/Codes/support_vector_machine/lib/svm_smo.py
|
0e73fa2184ef4d2fcb86116b01efa95cec3c0255
|
[] |
no_license
|
dreamlikexin/machine_learning
|
bc86ea15ef8552ad1be78a5bc65fb74a2cdb274e
|
850e87025270847210b6ad188d2da181983a72c7
|
refs/heads/master
| 2022-01-16T09:51:20.538340
| 2019-06-19T16:27:26
| 2019-06-19T16:27:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,937
|
py
|
import numpy as np
class SVM:
def get_H(self, Lambda, i, j, y):
if y[i]==y[j]:
return Lambda[i] + Lambda[j]
else:
return float("inf")
def get_L(self, Lambda, i, j, y):
if y[i]==y[j]:
return 0.0
else:
return max(0, Lambda[j] - Lambda[i])
def smo(self, X, y, K, N):
m, n = X.shape
Lambda = np.zeros((m,1))
epsilon = 1e-6
for t in range(N):
for i in range(m):
for j in range(m):
D_ij = 2 * K[i][j] - K[i][i] - K[j][j]
if abs(D_ij) < epsilon:
continue
E_i = K[:, i].dot(Lambda * y) - y[i]
E_j = K[:, j].dot(Lambda * y) - y[j]
delta_j = 1.0 * y[j] * (E_j - E_i) / D_ij
H_ij = self.get_H(Lambda, i, j, y)
L_ij = self.get_L(Lambda, i, j, y)
if Lambda[j] + delta_j > H_ij:
delta_j = H_ij - Lambda[j]
Lambda[j] = H_ij
elif Lambda[j] + delta_j < L_ij:
delta_j = L_ij - Lambda[j]
Lambda[j] = L_ij
else:
Lambda[j] += delta_j
delta_i = - y[i] * y[j] * delta_j
Lambda[i] += delta_i
if Lambda[i] > epsilon:
b = y[i] - K[:, i].dot(Lambda * y)
elif Lambda[j] > epsilon:
b = y[j] - K[:, j].dot(Lambda * y)
self.Lambda = Lambda
self.b = b
def fit(self, X, y, N = 10):
K = X.dot(X.T)
self.smo(X, y, K, N)
self.w = X.T.dot(self.Lambda * y)
def predict(self, X):
return np.sign(X.dot(self.w) + self.b)
|
[
"wanglei@wanglei-mbp.local"
] |
wanglei@wanglei-mbp.local
|
5dc3bef4afd928a08151a42b378ae0cc9051a420
|
a8062308fb3bf6c8952257504a50c3e97d801294
|
/problems/N539_Minimum_Time_Difference.py
|
606f46d1a86602b05472d086e54f47d8e7fe6dbb
|
[] |
no_license
|
wan-catherine/Leetcode
|
650d697a873ad23c0b64d08ad525bf9fcdb62b1b
|
238995bd23c8a6c40c6035890e94baa2473d4bbc
|
refs/heads/master
| 2023-09-01T00:56:27.677230
| 2023-08-31T00:49:31
| 2023-08-31T00:49:31
| 143,770,000
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 533
|
py
|
class Solution(object):
def findMinDifference(self, timePoints):
"""
:type timePoints: List[str]
:rtype: int
"""
if not timePoints:
return
times = []
for point in timePoints:
times.append(int(point[:2])*60 + int(point[3:]))
times.sort()
res = 24*60
length = len(times)
for i in range(1, length):
res = min(res, times[i] - times[i-1])
res = min(res, times[0] + 24*60 - times[-1])
return res
|
[
"rarry2012@gmail.com"
] |
rarry2012@gmail.com
|
91e4afe9a69b64b1252d3f879b3ec018d529bbad
|
5dd190725aaaeb7287d935b3c99c20480b208816
|
/object_detection/utils/np_mask_ops_test.py
|
a0ee46eff01f165f5ae94346b8a1b7fe2636149c
|
[
"MIT"
] |
permissive
|
DemonDamon/mask-detection-based-on-tf2odapi
|
32d947164fb54395b9e45368c0d4bcf3a6ea1c28
|
192ae544169c1230c21141c033800aa1bd94e9b6
|
refs/heads/main
| 2023-05-13T05:05:44.534885
| 2021-06-08T05:56:09
| 2021-06-08T05:56:09
| 369,463,131
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,719
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.np_mask_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.utils import np_mask_ops
class MaskOpsTests(tf.test.TestCase):
def setUp(self):
masks1_0 = np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0]],
dtype=np.uint8)
masks1_1 = np.array([[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]],
dtype=np.uint8)
masks1 = np.stack([masks1_0, masks1_1])
masks2_0 = np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0]],
dtype=np.uint8)
masks2_1 = np.array([[1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]],
dtype=np.uint8)
masks2_2 = np.array([[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0]],
dtype=np.uint8)
masks2 = np.stack([masks2_0, masks2_1, masks2_2])
self.masks1 = masks1
self.masks2 = masks2
def testArea(self):
areas = np_mask_ops.area(self.masks1)
expected_areas = np.array([8.0, 10.0], dtype=np.float32)
self.assertAllClose(expected_areas, areas)
def testIntersection(self):
intersection = np_mask_ops.intersection(self.masks1, self.masks2)
expected_intersection = np.array(
[[8.0, 0.0, 8.0], [0.0, 9.0, 7.0]], dtype=np.float32)
self.assertAllClose(intersection, expected_intersection)
def testIOU(self):
iou = np_mask_ops.iou(self.masks1, self.masks2)
expected_iou = np.array(
[[1.0, 0.0, 8.0/25.0], [0.0, 9.0 / 16.0, 7.0 / 28.0]], dtype=np.float32)
self.assertAllClose(iou, expected_iou)
def testIOA(self):
ioa21 = np_mask_ops.ioa(self.masks1, self.masks2)
expected_ioa21 = np.array([[1.0, 0.0, 8.0/25.0],
[0.0, 9.0/15.0, 7.0/25.0]],
dtype=np.float32)
self.assertAllClose(ioa21, expected_ioa21)
if __name__ == '__main__':
tf.test.main()
|
[
"noreply@github.com"
] |
DemonDamon.noreply@github.com
|
e831296b955c4901d82f0f076e71adc6e4910c97
|
43dec81f8466acb925a9c4830fe274de9cd1f51e
|
/backend/home/migrations/0004_auto_20201124_1916.py
|
31248f325cd782e66c1f5f5fd515ded3f3aaa5f0
|
[] |
no_license
|
crowdbotics-apps/lizz-11-10-mob2-22484
|
6cd2af76c92fb2f2c9047b70017b9e0c5adfbcdb
|
61ab9254234534b8384ec2b64451cf5ba7d587de
|
refs/heads/master
| 2023-02-03T02:07:07.406375
| 2020-12-22T18:53:04
| 2020-12-22T18:53:04
| 311,748,272
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
# Generated by Django 2.2.17 on 2020-11-24 19:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0003_homepage_hello'),
]
operations = [
migrations.RemoveField(
model_name='homepage',
name='body',
),
migrations.AddField(
model_name='homepage',
name='body2',
field=models.TextField(blank=True),
),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
bf29aac5c207544133fb68b24af753d74d7b9796
|
a3746020cf091f433beb41bde1b62818b4de569b
|
/new_rule/ticket-rules/oracle/SQL_TO_CHANGE_TYPE.py
|
1109b2501f3b147a116f67fc69a8723297cb7134
|
[] |
no_license
|
kk71/sqlaudit
|
59bab5765a67f56f1dd2f3103812051c5acbbc49
|
747aaa02573a9c2b46a9e14415d27c0ab8e6158c
|
refs/heads/master
| 2023-02-04T18:38:46.125746
| 2020-06-05T09:49:46
| 2020-06-05T09:49:46
| 323,559,338
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 436
|
py
|
import re
def code(rule, entries, **kwargs):
sql_plan_qs = kwargs["sql_plan_qs"]
plans = sql_plan_qs.filter(
filter_predicates=re.compile(r"(SYS_OP|TO_NUMBER|INTERNAL_FUNCTION)", re.I)
)
for x in plans:
return -rule.weight, [
x.statement_id,
x.plan_id,
x.object_name,
x.the_id,
x.cost
]
return None, []
code_hole.append(code)
|
[
"kai.fang@kirintech.cn"
] |
kai.fang@kirintech.cn
|
d039f7c165307c3cab9557169d0d0820f5754329
|
cb4e07b2a5dd30804ce428ec84d9e9f77709fcd5
|
/swea/D3/10570. 제곱 팰린드롬 수.py
|
9345f0450146e6efeba20eb3d88dd2e9b24a99b9
|
[] |
no_license
|
jbsam2/algo_problem
|
141c17003e88a69afdeea93a723e7f27c4626fdc
|
18f2cab5a9af2dec57b7fd6f8218badd7de822e4
|
refs/heads/master
| 2023-05-18T10:03:00.408300
| 2021-06-02T10:36:50
| 2021-06-02T10:36:50
| 282,104,637
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 271
|
py
|
def c(num):return 1 if num==int(num**(0.5))**2 else 0
for t in range(int(input())):
a,b=map(int,input().split());ans=0
for i in range(a,b+1):
p=int(i**(0.5))
if c(i) and str(i)==str(i)[::-1] and str(p)==str(p)[::-1]:ans+=1
print(f'#{t+1}',ans)
|
[
"kbsam2@gmail.com"
] |
kbsam2@gmail.com
|
5634ebaa358971f4de28704f86ff95ab91d76915
|
22b93005b05aa4cbfa6287c42e07244b9bf83be9
|
/mlflow/ml_package_versions.py
|
b5be01dffc43abf48dbe7e9ab8fc1f6dee71ebf8
|
[
"Apache-2.0"
] |
permissive
|
dbczumar/mlflow
|
63ede1f21966def17ded0da9c8e92a207b34b90d
|
e293a73b510c924cbca50b6337b6d6f9fd9f8f1b
|
refs/heads/master
| 2023-08-31T23:40:55.475707
| 2023-07-15T04:22:18
| 2023-07-15T04:22:18
| 138,797,518
| 1
| 3
|
Apache-2.0
| 2023-08-23T23:01:08
| 2018-06-26T21:51:19
|
Python
|
UTF-8
|
Python
| false
| false
| 5,987
|
py
|
# This file was auto-generated by update_ml_package_versions.py.
# Please do not edit it manually.
_ML_PACKAGE_VERSIONS = {
"sklearn": {
"package_info": {
"pip_release": "scikit-learn"
},
"models": {
"minimum": "0.22.1",
"maximum": "1.3.0"
},
"autologging": {
"minimum": "0.22.1",
"maximum": "1.3.0"
}
},
"pytorch": {
"package_info": {
"pip_release": "torch"
},
"models": {
"minimum": "1.6.0",
"maximum": "2.0.1"
},
"autologging": {
"minimum": "1.6.0",
"maximum": "2.0.1"
}
},
"pytorch-lightning": {
"package_info": {
"pip_release": "pytorch-lightning"
},
"autologging": {
"minimum": "1.0.5",
"maximum": "2.0.5"
}
},
"tensorflow": {
"package_info": {
"pip_release": "tensorflow"
},
"models": {
"minimum": "2.3.0",
"maximum": "2.13.0"
},
"autologging": {
"minimum": "2.3.0",
"maximum": "2.13.0"
}
},
"xgboost": {
"package_info": {
"pip_release": "xgboost"
},
"models": {
"minimum": "1.1.1",
"maximum": "1.7.6"
},
"autologging": {
"minimum": "1.1.1",
"maximum": "1.7.6"
}
},
"lightgbm": {
"package_info": {
"pip_release": "lightgbm"
},
"models": {
"minimum": "2.3.1",
"maximum": "4.0.0"
},
"autologging": {
"minimum": "2.3.1",
"maximum": "4.0.0"
}
},
"catboost": {
"package_info": {
"pip_release": "catboost"
},
"models": {
"minimum": "0.23.1",
"maximum": "1.2"
}
},
"gluon": {
"package_info": {
"pip_release": "mxnet"
},
"models": {
"minimum": "1.5.1",
"maximum": "1.9.1"
},
"autologging": {
"minimum": "1.5.1",
"maximum": "1.9.1"
}
},
"fastai": {
"package_info": {
"pip_release": "fastai"
},
"models": {
"minimum": "2.4.1",
"maximum": "2.7.12"
},
"autologging": {
"minimum": "2.4.1",
"maximum": "2.7.12"
}
},
"onnx": {
"package_info": {
"pip_release": "onnx"
},
"models": {
"minimum": "1.7.0",
"maximum": "1.14.0"
}
},
"spacy": {
"package_info": {
"pip_release": "spacy"
},
"models": {
"minimum": "2.2.4",
"maximum": "3.6.0"
}
},
"statsmodels": {
"package_info": {
"pip_release": "statsmodels"
},
"models": {
"minimum": "0.11.1",
"maximum": "0.14.0"
},
"autologging": {
"minimum": "0.11.1",
"maximum": "0.14.0"
}
},
"spark": {
"package_info": {
"pip_release": "pyspark"
},
"models": {
"minimum": "3.0.0",
"maximum": "3.4.1"
},
"autologging": {
"minimum": "3.0.0",
"maximum": "3.4.1"
}
},
"mleap": {
"package_info": {
"pip_release": "mleap"
},
"models": {
"minimum": "0.18.0",
"maximum": "0.23.0"
}
},
"prophet": {
"package_info": {
"pip_release": "prophet"
},
"models": {
"minimum": "1.0.1",
"maximum": "1.1.4"
}
},
"pmdarima": {
"package_info": {
"pip_release": "pmdarima"
},
"models": {
"minimum": "1.8.0",
"maximum": "2.0.3"
}
},
"diviner": {
"package_info": {
"pip_release": "diviner"
},
"models": {
"minimum": "0.1.0",
"maximum": "0.1.1"
}
},
"h2o": {
"package_info": {
"pip_release": "h2o"
},
"models": {
"minimum": "3.40.0.1",
"maximum": "3.42.0.1"
}
},
"shap": {
"package_info": {
"pip_release": "shap"
},
"models": {
"minimum": "0.41.0",
"maximum": "0.42.0"
}
},
"paddle": {
"package_info": {
"pip_release": "paddlepaddle"
},
"models": {
"minimum": "2.4.1",
"maximum": "2.5.0"
}
},
"transformers": {
"package_info": {
"pip_release": "transformers"
},
"models": {
"minimum": "4.25.1",
"maximum": "4.30.2"
},
"autologging": {
"minimum": "4.25.1",
"maximum": "4.30.2"
}
},
"openai": {
"package_info": {
"pip_release": "openai"
},
"models": {
"minimum": "0.27.2",
"maximum": "0.27.8"
}
},
"langchain": {
"package_info": {
"pip_release": "langchain"
},
"models": {
"minimum": "0.0.169",
"maximum": "0.0.232"
}
},
"sentence_transformers": {
"package_info": {
"pip_release": "sentence-transformers"
},
"models": {
"minimum": "2.2.2",
"maximum": "2.2.2"
}
},
"johnsnowlabs": {
"package_info": {
"pip_release": "johnsnowlabs"
},
"models": {
"minimum": "4.4.6",
"maximum": "5.0.0"
}
}
}
|
[
"noreply@github.com"
] |
dbczumar.noreply@github.com
|
615af758d32d2c1268ec81eea7298e8b8de7ef55
|
576cc83449e10fd3f98281970c46016ea7a5aea2
|
/demo01/demo04-resize.py
|
7fe6931b6b3219fd8402aa1089ff29d446980c6a
|
[] |
no_license
|
HotView/PycharmProjects
|
215ab9edd341e3293daebcf86d97537f8cd28d75
|
61393fe5ba781a8c1216a5cbe7e0d06149a10190
|
refs/heads/master
| 2020-06-02T07:41:53.608742
| 2019-11-13T08:31:57
| 2019-11-13T08:31:57
| 191,085,178
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 334
|
py
|
from matplotlib import pyplot as plt
import numpy as np
import cv2
img = cv2.imread("book01-.jpg")
img_cut = img[1000:,:]
img_resize = cv2.resize(img_cut,(600,600),interpolation=cv2.INTER_CUBIC)
#cv2.imwrite("book01resize-.jpg",img_resize)
cv2.imshow("origin",img)
cv2.imshow("reszie",img_resize)
cv2.waitKey()
cv2.destroyAllWindows()
|
[
"864773190@qq.com"
] |
864773190@qq.com
|
2df9bb0f75e0aa888664ef8141109604c4fb80ce
|
2f557f60fc609c03fbb42badf2c4f41ef2e60227
|
/DQMServices/Components/python/test/test_good_online_run_cfg.py
|
b7f1fc42e7ebfd7330bb6887869aa39da91b28bf
|
[
"Apache-2.0"
] |
permissive
|
CMS-TMTT/cmssw
|
91d70fc40a7110832a2ceb2dc08c15b5a299bd3b
|
80cb3a25c0d63594fe6455b837f7c3cbe3cf42d7
|
refs/heads/TMTT_1060
| 2020-03-24T07:49:39.440996
| 2020-03-04T17:21:36
| 2020-03-04T17:21:36
| 142,576,342
| 3
| 5
|
Apache-2.0
| 2019-12-05T21:16:34
| 2018-07-27T12:48:13
|
C++
|
UTF-8
|
Python
| false
| false
| 470
|
py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("TestDQMFileSaver")
process.load("DQMServices.Components.test.test_good_online_basic_cfi")
process.load("DQMServices.Components.test.MessageLogger_cfi")
process.load("DQMServices.Components.DQMEnvironment_cfi")
process.load("DQMServices.Core.DQM_cfg")
process.dqmmodules = cms.Path(process.dqmEnv+process.dqmSaver)
process.dqmSaver.convention = 'Online'
process.dqmEnv.subSystemFolder = 'TestSystem'
|
[
"giulio.eulisse@gmail.com"
] |
giulio.eulisse@gmail.com
|
9137f27ec4c8f050a41a19a14e938c78fb1cd0e9
|
3dd43ff0dab514a39f611487ab421256b3b5b13b
|
/scripts/client/gui/Scaleform/daapi/view/lobby/hangar/carousels/ranked/carousel_data_provider.py
|
048d8fce65aedbf46febbd1c5c6f3b312913163c
|
[] |
no_license
|
kusaku/wotscripts
|
04ab289e3fec134e290355ecf81cf703af189f72
|
a89c2f825d3c7dade7bc5163a6c04e7f5bab587d
|
refs/heads/master
| 2023-08-20T00:17:36.852522
| 2018-02-26T14:53:44
| 2018-02-26T14:53:44
| 80,610,354
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,222
|
py
|
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/hangar/carousels/ranked/carousel_data_provider.py
from gui.Scaleform.daapi.view.lobby.hangar.carousels.basic.carousel_data_provider import HangarCarouselDataProvider
from gui.Scaleform.locale.RANKED_BATTLES import RANKED_BATTLES
from gui.shared.formatters import text_styles
from gui.shared.gui_items.Vehicle import Vehicle
from gui.shared.utils.functions import makeTooltip
class RankedCarouselDataProvider(HangarCarouselDataProvider):
@classmethod
def _vehicleComparisonKey(cls, vehicle):
result = [vehicle.getCustomState() == Vehicle.VEHICLE_STATE.UNSUITABLE_TO_QUEUE]
result.extend(super(RankedCarouselDataProvider, cls)._vehicleComparisonKey(vehicle))
return result
def _buildVehicle(self, vehicle):
result = super(RankedCarouselDataProvider, self)._buildVehicle(vehicle)
state, _ = vehicle.getState()
if state == Vehicle.VEHICLE_STATE.UNSUITABLE_TO_QUEUE:
result['lockedTooltip'] = makeTooltip(RANKED_BATTLES.RANKEDBATTLESCAROUSEL_LOCKEDTOOLTIP_HEADER, RANKED_BATTLES.RANKEDBATTLESCAROUSEL_LOCKEDTOOLTIP_BODY)
result['clickEnabled'] = True
return result
|
[
"kirill.a@aggrostudios.com"
] |
kirill.a@aggrostudios.com
|
a0adb96467c96dad6dd4c36cc41e36636bc0d50b
|
b2625b1a1ef4a3a255ae88b6d77c425727187eeb
|
/.dev_scripts/github/update_copyright.py
|
74320198598ed850edad4f2404605c54f1b4e17f
|
[
"Apache-2.0"
] |
permissive
|
wojiazaiyugang/mmpose
|
acd4083d142c5c4c2dd87e6be94a5891a42d2797
|
8947b39294b037e8272c6cf2f53ae4aa7d22193b
|
refs/heads/master
| 2023-09-01T23:45:43.857657
| 2021-11-23T03:03:02
| 2021-11-23T03:03:02
| 356,105,054
| 0
| 0
|
Apache-2.0
| 2021-09-16T06:36:44
| 2021-04-09T02:08:50
|
Python
|
UTF-8
|
Python
| false
| false
| 2,928
|
py
|
#!/usr/bin/env python
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import re
import sys
HEADER = 'Copyright (c) OpenMMLab. All rights reserved.\n'
HEADER_KEYWORDS = {'Copyright', 'License'}
def contains_header(lines, comment_symbol, max_header_lines):
for line in lines[:max_header_lines]:
if line.startswith('#!'):
# skip shebang line
continue
elif re.match(f'{comment_symbol}.*({"|".join(HEADER_KEYWORDS)})',
line):
return True
return False
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'files',
type=str,
nargs='*',
help='Files to add copyright header. If an empty list is given, '
'search target files according to "--src", "--exclude" and '
'"--suffixes"')
parser.add_argument(
'--src', type=str, default=None, help='Root path to search files.')
parser.add_argument(
'--exclude', type=str, default=None, help='Path to exclude in search.')
parser.add_argument(
'--suffixes',
type=str,
nargs='+',
default=['.py', '.c', '.cpp', '.cu', '.sh'],
help='Only files with one of the given suffixes will be searched.')
parser.add_argument(
'--max-header-lines',
type=int,
default=5,
help='Only checkout copyright information in the first several lines '
'of a file.')
args = parser.parse_args()
return args
def main():
args = parse_args()
file_list = []
if args.files:
file_list = args.files
else:
assert args.src is not None
for root, _, files in os.walk(args.src):
if args.exclude and osp.realpath(root).startswith(
osp.realpath(args.exclude)):
continue
for file in files:
if osp.splitext(file)[1] in args.suffixes:
file_list.append(osp.join(root, file))
modified = False
for file in file_list:
suffix = osp.splitext(file)[1]
if suffix in {'.py', '.sh'}:
comment_symbol = '# '
elif suffix in {'.c', '.cpp', '.cu'}:
comment_symbol = '// '
else:
raise ValueError(f'Comment symbol of files with suffix {suffix} '
'is unspecified.')
with open(file, 'r') as f:
lines = f.readlines()
if not contains_header(lines, comment_symbol, args.max_header_lines):
if lines and lines[0].startswith('#!'):
lines.insert(1, comment_symbol + HEADER)
else:
lines.insert(0, comment_symbol + HEADER)
with open(file, 'w') as f:
f.writelines(lines)
modified = True
return int(modified)
if __name__ == '__main__':
sys.exit(main())
|
[
"noreply@github.com"
] |
wojiazaiyugang.noreply@github.com
|
3dc4fcb72c535bd5635233808c4b20178c15fe20
|
c2849586a8f376cf96fcbdc1c7e5bce6522398ca
|
/ch39/interfacetracer.py
|
033591fd7329b9bc42e2527b180411b9735c1e96
|
[] |
no_license
|
freebz/Learning-Python
|
0559d7691517b4acb0228d1cc76de3e93915fb27
|
7f577edb6249f4bbcac4f590908b385192dbf308
|
refs/heads/master
| 2020-09-23T01:48:24.009383
| 2019-12-02T12:26:40
| 2019-12-02T12:26:40
| 225,371,155
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,033
|
py
|
def Tracer(aClass): # @ 데코레이션할 때
class Wrapper:
def __init__(self, *args, **kargs): # 인스턴스 생성할 때
self.fetches = 0
self.wrapped = aClass(*args, **kargs) # 유효 범위 이름 사용
def __getattr__(self, attrname):
print('Trace: ' + attrname) # 자신의 속성을 제외한 모든 것을 잡아냄
self.fetches += 1
return getattr(self.wrapped, attrname) # 내장 객체에 위임
return Wrapper
if __name__ == '__main__':
@Tracer
class Spam: # Spam = Tracer(Spam)
def display(self): # Spam은 Wrapper에 재결합
print('Spam!' * 8)
@Tracer
class Person: # Person = Tracer(Person)
def __init__(self, name, hours, rate): # Wrapper는 Person을 기억
self.name = name
self.hours = hours
self.rate = rate
def pay(self): # 클래스 외부에서의 접근을 추적
return self.hours * self.rate # 메서드 내부 접근은 추적되지 않음
food = Spam() # Wrapper() 실행
food.display() # __getattr__ 실행
print([food.fetches])
bob = Person('Bob', 40, 50) # bob이 실제로 Wrapper임
print(bob.name) # Wrapper는 Person을 내장함
print(bob.pay())
print('')
sue = Person('Sue', rate=100, hours=60) # sue는 다른 Wrapper임
print(sue.name) # sue는 다른 Person을 가짐
print(sue.pay())
print(bob.name) # bob은 다른 상태를 가짐
print(bob.pay())
print([bob.fetches, sue.fetches]) # Wrapper 속성은 추적되지 않음
|
[
"freebz@hananet.net"
] |
freebz@hananet.net
|
76884be84b73cffc74cbf5157ec534df610216e9
|
6a5ce7d885db1baa5a9d43b26f0ae623a5ef0f01
|
/azure-mgmt-web/azure/mgmt/web/models/web_site_management_client_enums.py
|
fcb642b79aa76c81394fb900d1b60c22ede03d1c
|
[
"Apache-2.0"
] |
permissive
|
JammyBrand82/azure-sdk-for-python
|
333af194ff9143ec77f49203a5a71f15c399f278
|
c65e189cd41bd3464556b17bfcdee1303867996c
|
refs/heads/master
| 2021-01-17T18:31:10.661151
| 2016-03-17T21:03:08
| 2016-03-17T21:03:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,118
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class DomainStatus(Enum):
active = "Active"
awaiting = "Awaiting"
cancelled = "Cancelled"
confiscated = "Confiscated"
disabled = "Disabled"
excluded = "Excluded"
expired = "Expired"
failed = "Failed"
held = "Held"
locked = "Locked"
parked = "Parked"
pending = "Pending"
reserved = "Reserved"
reverted = "Reverted"
suspended = "Suspended"
transferred = "Transferred"
unknown = "Unknown"
unlocked = "Unlocked"
unparked = "Unparked"
updated = "Updated"
json_converter_failed = "JsonConverterFailed"
class ProvisioningState(Enum):
succeeded = "Succeeded"
failed = "Failed"
canceled = "Canceled"
in_progress = "InProgress"
deleting = "Deleting"
class AzureResourceType(Enum):
website = "Website"
traffic_manager = "TrafficManager"
class CustomHostNameDnsRecordType(Enum):
cname = "CName"
a = "A"
class HostNameType(Enum):
verified = "Verified"
managed = "Managed"
class StatusOptions(Enum):
ready = "Ready"
pending = "Pending"
class UsageState(Enum):
normal = "Normal"
exceeded = "Exceeded"
class SiteAvailabilityState(Enum):
normal = "Normal"
limited = "Limited"
disaster_recovery_mode = "DisasterRecoveryMode"
class SslState(Enum):
disabled = "Disabled"
sni_enabled = "SniEnabled"
ip_based_enabled = "IpBasedEnabled"
class DatabaseServerType(Enum):
my_sql = "MySql"
sql_server = "SQLServer"
sql_azure = "SQLAzure"
custom = "Custom"
class ManagedPipelineMode(Enum):
integrated = "Integrated"
classic = "Classic"
class SiteLoadBalancing(Enum):
weighted_round_robin = "WeightedRoundRobin"
least_requests = "LeastRequests"
least_response_time = "LeastResponseTime"
weighted_total_traffic = "WeightedTotalTraffic"
request_hash = "RequestHash"
class AutoHealActionType(Enum):
recycle = "Recycle"
log_event = "LogEvent"
custom_action = "CustomAction"
class UnauthenticatedClientAction(Enum):
redirect_to_login_page = "RedirectToLoginPage"
allow_anonymous = "AllowAnonymous"
class BuiltInAuthenticationProvider(Enum):
azure_active_directory = "AzureActiveDirectory"
facebook = "Facebook"
google = "Google"
microsoft_account = "MicrosoftAccount"
twitter = "Twitter"
class HostingEnvironmentStatus(Enum):
preparing = "Preparing"
ready = "Ready"
scaling = "Scaling"
deleting = "Deleting"
class InternalLoadBalancingMode(Enum):
none = "None"
web = "Web"
publishing = "Publishing"
class ComputeModeOptions(Enum):
shared = "Shared"
dedicated = "Dedicated"
class WorkerSizeOptions(Enum):
default = "Default"
small = "Small"
medium = "Medium"
large = "Large"
class AccessControlEntryAction(Enum):
permit = "Permit"
deny = "Deny"
class ManagedHostingEnvironmentStatus(Enum):
preparing = "Preparing"
ready = "Ready"
deleting = "Deleting"
class DomainType(Enum):
regular = "Regular"
soft_deleted = "SoftDeleted"
class NotificationLevel(Enum):
critical = "Critical"
warning = "Warning"
information = "Information"
non_urgent_suggestion = "NonUrgentSuggestion"
class Channels(Enum):
notification = "Notification"
api = "Api"
email = "Email"
all = "All"
class CloneAbilityResult(Enum):
cloneable = "Cloneable"
partially_cloneable = "PartiallyCloneable"
not_cloneable = "NotCloneable"
class LogLevel(Enum):
off = "Off"
verbose = "Verbose"
information = "Information"
warning = "Warning"
error = "Error"
class FrequencyUnit(Enum):
day = "Day"
hour = "Hour"
class BackupRestoreOperationType(Enum):
default = "Default"
clone = "Clone"
relocation = "Relocation"
class BackupItemStatus(Enum):
in_progress = "InProgress"
failed = "Failed"
succeeded = "Succeeded"
timed_out = "TimedOut"
created = "Created"
skipped = "Skipped"
partially_succeeded = "PartiallySucceeded"
delete_in_progress = "DeleteInProgress"
delete_failed = "DeleteFailed"
deleted = "Deleted"
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
8d0a0b0dec63a2130b9ada6f938cfbddbaeeb4a8
|
10920b11a22a20f9a7f63157818327f3c4e41888
|
/jibby_opencv/Object Recognition/two.py
|
10e258ca8fedf5c5342ee07ae449937c19975227
|
[] |
no_license
|
dsall/computerv
|
e331b3d025c8cec0119b789107d1fef18d08f02a
|
40671d618c31ad9d9b20fc902a218a8e281098bc
|
refs/heads/master
| 2021-09-15T09:33:08.495580
| 2018-05-29T23:41:42
| 2018-05-29T23:41:42
| 135,363,332
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,172
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 5 00:23:15 2018
@author: djibrilsall
"""
import numpy as np
import cv2
# multiple cascades: https://github.com/Itseez/opencv/tree/master/data/haarcascades
#https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_frontalface_default.xml
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
#https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_eye.xml
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
cap = cv2.VideoCapture(1)
while 1:
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
cv2.imshow('img',img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
|
[
"djiby45@outlook.com"
] |
djiby45@outlook.com
|
f01c9f4cb6e578f40f952d27a77dbdab38e9b181
|
0b5ab7349485da4ea40ca343bc50f4cab74c917c
|
/week09/tutorial/snippets/urls.py
|
2dcc6decba1a0c26454d9a83c0f3e37011525c86
|
[] |
no_license
|
workherd/Python006-006
|
9bf2782ccda037de9af98eb7daa87fd1edeb3caf
|
7aa176c3cf4effd015802b550edfb70f859e94d9
|
refs/heads/main
| 2023-04-29T14:37:43.545376
| 2021-05-16T04:13:08
| 2021-05-16T04:13:08
| 323,247,475
| 1
| 0
| null | 2020-12-21T06:13:42
| 2020-12-21T06:13:42
| null |
UTF-8
|
Python
| false
| false
| 2,227
|
py
|
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from snippets import views
# Create a router and register our viewsets with it.
router = DefaultRouter()
router.register(r'snippets', views.SnippetViewSet)
router.register(r'users', views.UserViewSet)
# The API URLs are now determined automatically by the router.
urlpatterns = [
path('', include(router.urls)),
path('aaa/', views.api_root01),
]
# from rest_framework.urlpatterns import format_suffix_patterns
# from snippets.views import SnippetViewSet, UserViewSet, api_root
# from rest_framework import renderers
# snippet_list = SnippetViewSet.as_view({
# 'get': 'list',
# 'post': 'create'
# })
# snippet_detail = SnippetViewSet.as_view({
# 'get': 'retrieve',
# 'put': 'update',
# 'patch': 'partial_update',
# 'delete': 'destroy'
# })
# snippet_highlight = SnippetViewSet.as_view({
# 'get': 'highlight'
# }, renderer_classes=[renderers.StaticHTMLRenderer])
# user_list = UserViewSet.as_view({
# 'get': 'list'
# })
# user_detail = UserViewSet.as_view({
# 'get': 'retrieve'
# })
# urlpatterns = [
# path('', views.api_root),
# path('snippets/',
# views.SnippetList.as_view(),
# name='snippet-list'),
# path('snippets/<int:pk>/',
# views.SnippetDetail.as_view(),
# name='snippet-detail'),
# path('snippets/<int:pk>/highlight/',
# views.SnippetHighlight.as_view(),
# name='snippet-highlight'),
# path('users/',
# views.UserList.as_view(),
# name='user-list'),
# path('users/<int:pk>/',
# views.UserDetail.as_view(),
# name='user-detail')
# ]
# # 能够处理诸如http://example.com/api/items/4.json之类的URL
# urlpatterns = format_suffix_patterns(urlpatterns)
# urlpatterns = format_suffix_patterns([
# path('', api_root),
# path('snippets/', snippet_list, name='snippet-list'),
# path('snippets/<int:pk>/', snippet_detail, name='snippet-detail'),
# path('snippets/<int:pk>/highlight/', snippet_highlight, name='snippet-highlight'),
# path('users/', user_list, name='user-list'),
# path('users/<int:pk>/', user_detail, name='user-detail')
# ])
|
[
"1330430077@qq.com"
] |
1330430077@qq.com
|
27686a5573582bba05fe1c037ddb797cb55f040b
|
3af6960c805e9903eb27c09d8bc7ebc77f5928fe
|
/problems/0216_Combination_Sum_III/solution.py
|
e332183e542f99042b9b4fe0e2ddce1af912459b
|
[] |
no_license
|
romain-li/leetcode
|
b3c8d9d4473eebd039af16ad2d4d99abc2768bdd
|
5e82b69bd041c2c168d75cb9179a8cbd7bf0173e
|
refs/heads/master
| 2020-06-04T20:05:03.592558
| 2015-06-08T18:05:03
| 2015-06-08T18:05:03
| 27,431,664
| 2
| 1
| null | 2015-06-08T18:05:04
| 2014-12-02T12:31:58
|
Python
|
UTF-8
|
Python
| false
| false
| 139
|
py
|
class Solution:
# @param {integer} k
# @param {integer} n
# @return {integer[][]}
def combinationSum3(self, k, n):
|
[
"romain_li@163.com"
] |
romain_li@163.com
|
0ef3a3afbb0a5ad1f607d1f4fa56b8207a2c978d
|
857a9e588a04b40a66b6ca115063cb67ef0427ea
|
/timemachines/skaters/glu/glusimple.py
|
45f7a1894d2d9f43387ba47e6408b68c40771bb8
|
[
"MIT"
] |
permissive
|
rambam613/timemachines
|
81b88357498871f77efed0faf9c25b4c408d822c
|
cd243d4606b4ad9c1d419988fc6c04b0964af2e6
|
refs/heads/main
| 2023-07-03T07:06:24.421114
| 2021-08-07T17:42:40
| 2021-08-07T17:42:40
| 393,793,785
| 1
| 0
|
MIT
| 2021-08-07T21:13:35
| 2021-08-07T21:13:34
| null |
UTF-8
|
Python
| false
| false
| 1,092
|
py
|
from timemachines.skatertools.utilities.conventions import Y_TYPE, A_TYPE, R_TYPE, E_TYPE, T_TYPE, wrap
from typing import Any
from timemachines.skatertools.components.parade import parade
from timemachines.skatertools.utilities.nonemath import nonecast
from timemachines.skatertools.ensembling.ensemblefactory import precision_weighted_ensemble_factory
def glu_simple(y :Y_TYPE, s, k:int, a:A_TYPE =None, t:T_TYPE =None, e:E_TYPE =None, r:R_TYPE=None):
""" Rolling gluon
"""
assert r is not None
y0 = wrap(y)[0]
if not s.get('p'):
s = {'p':{},
'x':y0,
'rho':r}
assert 0 <= s['rho'] <= 1, 'Expecting rho=r to be between 0 and 1'
else:
assert abs(r-s['rho'])<1e-6,'rho=r is immutable'
if y0 is None:
return None, s, None
else:
s['x'] = s['rho']*s['x'] + (1-s['rho'])*y0 # Make me better !
x = [s['x']]*k
_we_ignore_bias, x_std, s['p'] = parade(p=s['p'], x=x, y=y0)
x_std_fallback = nonecast(x_std,fill_value=1.0)
return [s['x']] * k, x_std_fallback, s
|
[
"peter.cotton@microprediction.com"
] |
peter.cotton@microprediction.com
|
4febe8ed7ba4ae0ec44e162ddac26a0a35201331
|
e73761fd861010f4dd2e2be09507d86bd905f4f5
|
/scud/main/migrations/0002_sessionstouser.py
|
78e5450bb8e12f64aa092230e4fccdbb5aac8446
|
[] |
no_license
|
BakdauletBolatE/rfid-system
|
a57ca2fbb3518f9df6683bf899be1d9455e55c2a
|
24f023cc801dc0d24dedb4e7ecd27091c439c068
|
refs/heads/main
| 2023-03-13T05:14:37.043832
| 2021-02-22T06:12:31
| 2021-02-22T06:12:31
| 341,097,047
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 825
|
py
|
# Generated by Django 3.1.6 on 2021-02-18 17:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='SessionsToUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Member_ID', models.CharField(max_length=255, verbose_name='Мембер ID')),
('allowed_members', models.BooleanField(default=False, verbose_name='Авторизован ли')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='main.users')),
],
),
]
|
[
"bakosh21345@gmail.com"
] |
bakosh21345@gmail.com
|
de41905ee008a0a8004b2f583f9b16d0ab569823
|
95e9ec4b3b0d86063da53a0e62e138cf794cce3a
|
/python/Django/20190523/test01/test01/settings.py
|
199c6f60e7fba053de9340c0fb1a68759f7ad78a
|
[] |
no_license
|
wjl626nice/1902
|
c3d350d91925a01628c9402cbceb32ebf812e43c
|
5a1a6dd59cdd903563389fa7c73a283e8657d731
|
refs/heads/master
| 2023-01-05T23:51:47.667675
| 2019-08-19T06:42:09
| 2019-08-19T06:42:09
| 180,686,044
| 4
| 1
| null | 2023-01-04T07:35:24
| 2019-04-11T00:46:43
|
Python
|
UTF-8
|
Python
| false
| false
| 4,057
|
py
|
"""
Django settings for test01 project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
# 项目根目录
# __file__ 文件的路径
# os.path.abspath 获取文件的绝对路径
# os.path.dirname 获取文件所在的目录
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# BASE_DIR = /Users/qingyun/1902/python/Django/20190523/test01
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'wv+64-c5u)4iyx)v3jl*ix&3j=gu1+*&)djfido(7^nus$l21n'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# 路由 uri 和函数对应的模块
ROOT_URLCONF = 'test01.urls'
# 设置模板配置
TEMPLATES = [
{
# 模板引擎,jinja2
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# 配置模板的位置 os.path.join 可以让咱们写的程序跨平台,不同操作系统目录分隔符不一样。
'DIRS': [os.path.join(BASE_DIR, 'templates')]
# window 目录:C:\\abc\\ab\\aa
# linux 目录:/abc/ab/aa
# os.path.join(BASE_DIR, 'templates') 结果:/Users/qingyun/1902/python/Django/20190523/test01/templates
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'test01.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
# 数据库的配置
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
# 设置中国的时区 PRC 中华人民共和国
TIME_ZONE = 'PRC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
# css js image等静态文件保存的路径 别名。相当于STATICFILES_DIRS的别名
STATIC_URL = '/static/'
# Django项目中所有的css js image 都会从该配置目录中查找对应文件
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'statics')
]
|
[
"18537160262@qq.com"
] |
18537160262@qq.com
|
a4f1eb5a73bb211daab14e6aac02273d7a07b9c7
|
093b9569be9d1c4e5daf92efbebc38f680917b2d
|
/.history/base/models_20210829142734.py
|
4c98bfd01c1d244a8078fbe1666e967e01960773
|
[] |
no_license
|
Justin-Panagos/todoList
|
95b1e97ff71af1b0be58e7f8937d726a687cea4d
|
10539219b59fcea00f8b19a406db3d4c3f4d289e
|
refs/heads/master
| 2023-08-04T13:27:13.309769
| 2021-08-29T14:06:43
| 2021-08-29T14:06:43
| 400,827,602
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 692
|
py
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Task(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, null = True, blank=True)
STATUS= [(,'P1'),(1,'P2'),(2,'P3'),]
priority = models.CharField(max_length=5, choices=STATUS,default='',)
title = models.Te(max_length=200)
description = models.TextField(null=True, blank=True)
duedate = models.DateField(null=True, blank=True)
complete = models.BooleanField(default=False)
create =models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.title
class Meta:
ordering =['complete']
|
[
"justpanagos@gmail.com"
] |
justpanagos@gmail.com
|
aca6239bb1902abcf69c2fa4ab44b62d104cc3ee
|
78f43f8bd07ae0fc91738a63cd7bbca08ae26066
|
/leetcode/interval/least_interval.py
|
b5418240bed39df4bf2a1b61baecf70e18b8cf95
|
[] |
no_license
|
hanrick2000/LeetcodePy
|
2f3a841f696005e8f0bf4cd33fe586f97173731f
|
b24fb0e7403606127d26f91ff86ddf8d2b071318
|
refs/heads/master
| 2022-04-14T01:34:05.044542
| 2020-04-12T06:11:29
| 2020-04-12T06:11:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 745
|
py
|
class Solution(object):
def leastInterval(self, tasks, n):
"""
:type tasks: List[str]
:type n: int
:rtype: int
"""
LETTER_NUM = 26
freqs = [0] * LETTER_NUM
for c in tasks:
freqs[ord(c) - ord('A')] += 1
freqs.sort() # in-place sort
ret = 0
while freqs[-1] > 0:
i = 0
while i <= n:
if freqs[-1] == 0:
break
# use most frequently appeared letter by turn
if i < LETTER_NUM and freqs[LETTER_NUM - i - 1] > 0:
freqs[LETTER_NUM - i - 1] -= 1
ret += 1
i += 1
freqs.sort()
return ret
|
[
"dofu@ebay.com"
] |
dofu@ebay.com
|
434f2a4e5d4c626ba4768123e191fc6823872f15
|
c9f1cc3a6715917d658a6e525b7c2d35b0380f9f
|
/Non_canonical_introns/Analisys/Join_final_tables_seq.py
|
cf7f3dde6d7fa321b0e71a9c47f123d3081f1b1d
|
[] |
no_license
|
geparada/my_src
|
4f84887130b985e84aad3d0d35e85911087d9b4f
|
8d64f7ef51e1f74303ca88beb0ee964f546d8301
|
refs/heads/master
| 2021-01-17T01:50:50.414690
| 2017-03-14T10:01:50
| 2017-03-14T10:01:50
| 20,638,727
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,188
|
py
|
import sys
import csv
def main(hg19, SJ_hg19, hg19_reads_seq_tags, GM12878, SJ_GM12878_paternal, SJ_GM12878_maternal, GM12878_reads_seq_tags_paternal, GM12878_reads_seq_tags_maternal, TOTAL_final_table):
csv.field_size_limit(1000000000)
reader1 = csv.reader(open(hg19), delimiter = ' ')
reader2 = csv.reader(open(SJ_hg19), delimiter = ' ')
reader3 = csv.reader(open(hg19_reads_seq_tags), delimiter = ' ')
reader4 = csv.reader(open(GM12878), delimiter = ' ')
reader5 = csv.reader(open(SJ_GM12878_paternal), delimiter = ' ')
reader6 = csv.reader(open(SJ_GM12878_maternal), delimiter = ' ')
reader7 = csv.reader(open(GM12878_reads_seq_tags_paternal), delimiter = ' ')
reader8 = csv.reader(open(GM12878_reads_seq_tags_maternal), delimiter = ' ')
reader9 = csv.reader(open(TOTAL_final_table), delimiter = ' ')
hg19_intron_reads = {}
GM12878_intron_reads = {}
reads_seq = {}
for row in reader1:
intron = row[0]
read = row[10].split(",")[:5]
hg19_intron_reads[intron] = read
for row in reader2:
read = row[0]
dn = row[7]
seq = row[14]
if dn!="GTAG" and dn!="GCAG" and dn!="ATAC":
reads_seq[read] = seq
for row in reader3:
read = row[0]
seq = row[1]
reads_seq[read] = seq
for row in reader4:
intron = row[0]
read = row[10].split(",")[:5]
GM12878_intron_reads[intron] = read
for row in reader5:
read = row[0]
dn = row[7]
seq = row[14]
if dn!="GTAG" and dn!="GCAG" and dn!="ATAC":
reads_seq[read] = seq
for row in reader6:
read = row[0]
dn = row[7]
seq = row[14]
if dn!="GTAG" and dn!="GCAG" and dn!="ATAC":
reads_seq[read] = seq
for row in reader7:
read = row[0]
seq = row[1]
reads_seq[read] = seq
for row in reader8:
read = row[0]
seq = row[1]
reads_seq[read] = seq
for row in reader9:
intron = row[0]
dn = row[6]
hg19 = int(row[9])
GM12878 = int(row[10])
if dn!="GTAG" and dn!="GCAG" and dn!="ATAC":
seqs_hg19 = []
seqs_GM12878 = []
try:
reads_hg19 = hg19_intron_reads[intron]
for read in reads_hg19:
seq = reads_seq[read]
seqs_hg19.append(seq)
except KeyError:
pass
try:
reads_GM12878 = GM12878_intron_reads[intron]
for read in reads_GM12878:
seq = reads_seq[read]
seqs_GM12878.append(seq)
except KeyError:
pass
if seqs_hg19 == []:
seqs_hg19 = "0"
if seqs_GM12878 == []:
seqs_GM12878 = "0"
print " ".join(row), ",".join(seqs_hg19), ",".join(seqs_GM12878)
#python ~/my_src/Analisys/Join_final_tables_seq.py ../hg19/ALL/introns.final_table.hg19.fixed.tags ../hg19/ALL/SJ.introns.blat1.TOTAL tags/hg19/TOTAL.tags.filter.final ../GM12878/NA12878_Joel_Rozowsky/STRANDED/TOTAL/introns.final_table.hg19.fixed.tags ../GM12878/NA12878_Joel_Rozowsky/STRANDED/TOTAL_paternal/ALL/SJ.introns.blat1.TOTAL ../GM12878/NA12878_Joel_Rozowsky/STRANDED/TOTAL_maternal/ALL/SJ.introns.blat1.TOTAL tags/GM12878/paternal/TOTAL.tags.filter.final tags/GM12878/maternal/TOTAL.tags.filter.final TOTAL_introns.final_table.tags
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6], sys.argv[7], sys.argv[8], sys.argv[9])
|
[
"geparada@omics.(none)"
] |
geparada@omics.(none)
|
3fdea1ed28d82a773de42cb6859f5ce3fa0ceefd
|
aab2f6f5f673bf16424d592142ba3af414423adb
|
/kafkaconsumer.py
|
5b972353177ac3b7eef951403c753ffb888eaab2
|
[] |
no_license
|
ashishjsharda/KafkaUsingPython
|
75f46ba4df25f264e853615c8fde73ed59aa620e
|
cbd9aff94b9d896736e14befcb42dfa64efc1562
|
refs/heads/master
| 2020-12-19T11:07:08.891269
| 2020-01-23T03:22:39
| 2020-01-23T03:22:39
| 235,715,462
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
'''
Created on Jan 22, 2020
@author: ashish
'''
from kafka import KafkaConsumer
consumer=KafkaConsumer('sample')
for message in consumer:
print(message)
|
[
"noreply@github.com"
] |
ashishjsharda.noreply@github.com
|
0d7c3cc6491bf482131fbbb00556b7368044d75d
|
391d648132c1a05e7da575205eef89a7208a892a
|
/compare.py
|
311165b27e102d591a5f36b47d2bd374e0a1c43b
|
[] |
no_license
|
michaelbateman/DemographicInference
|
c3ceaf69f8b554f3973473607d6b5201cca423f9
|
b1e2529b1ce0710f82d2867d08588ae4a6c72bb2
|
refs/heads/master
| 2021-01-10T06:28:43.698581
| 2015-10-06T18:25:45
| 2015-10-06T18:25:45
| 43,770,029
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,089
|
py
|
import sys
import matplotlib.pyplot as plt
import numpy as np
import subprocess
import time
import pylab
from scipy import stats
num_haps = int(sys.argv[1])
num_times = int(sys.argv[2])
def readfire(s): # reads the output of a fire file and
# returns the time vector and the population vector
# input file should have two columns: first column is time in generations
# second column is population
time = []
pop = []
with open(s, 'r') as input_file:
throwaway = input_file.readline()
while throwaway.strip() != 'START HERE':
throwaway = input_file.readline()
for line in input_file:
temp = line.strip()
L = temp.split()
if 'START' in temp:
print 'START'
time = []
pop = []
elif 'f' in temp:
print temp
elif len(L) >= 2:
#print 'no'
#print temp
temp = line.strip()
a,b = temp.split()
time.append(float(a))
pop.append(float(b))
#with open(s, 'r') as input_file:
#throwaway = input_file.readline()
#while throwaway.strip() != 'START HERE':
#throwaway = input_file.readline()
#for line in input_file:
#print 'hello'
#temp = line.strip()
#a,b = temp.split()
#time.append(float(a))
#pop.append(float(b))
#print a, b
print 'readfire is done'
return [time, pop]
pop_list = ['GBR', 'CEU','YRI', 'FIN', 'PEL', 'ESN']
IC_list = ['FIN']
for pop in pop_list:
for start in IC_list:
root = pop + '.' + str(num_haps/2) + '.composite.max.binned'
t = root + '.fire'
print pop, 'starting from flat 50000 ',
[T, P] = readfire(t)
#print t
#t = pop + '.fire'
ic_file = start + '.ic.txt'
t = root +'.'+ ic_file + '.fire'
print pop, 'starting from', start
[T, P] = readfire(t)
#plt.plot(np.multiply(28,T) ,P, '-o', label = pop )
#plt.yscale('log')
#plt.xlabel('years')
#plt.title('re-started from ' + start +' curve')
#fig = plt.figure()
#fig.set_yscale('log')
#plt.legend(loc = 'lower right')
#title = 'ic.' + start + '.' +str(num_haps/2)
#for pop in pop_list:
#title+= '.' + pop
#pylab.savefig(title + '.png', bbox_inches='tight')
#plt.show()
|
[
"bbeeefcake@gmail.com"
] |
bbeeefcake@gmail.com
|
c8ef2e7798953aa2231f6ae172d6891809e33e43
|
c155d27bf74255b8315603518c5ab76d0638dfea
|
/uv/serpens/profiles/3sigma_cn_3.py
|
f6f8c1ebbcce36349089a536d0cded0167ff74b4
|
[] |
no_license
|
amirocha/doktorat
|
406c2e4476a5a22c863c37eb5f581a369800e936
|
22c90228f6bca9d0b116c73457b7e86ae4462167
|
refs/heads/master
| 2021-07-05T14:18:26.324886
| 2020-08-03T19:28:50
| 2020-08-03T19:28:50
| 152,316,686
| 0
| 0
| null | 2020-04-03T20:22:37
| 2018-10-09T20:28:29
|
Python
|
UTF-8
|
Python
| false
| false
| 7,857
|
py
|
#-*-coding: utf-8-*-
'''
DESCRIPTION: This script displays the averaged and resampled spectrum (0.5 km/s)
DESCRIPTION: for a given region on map and plots 3*RMS and 1*RMS levels
DESCRIPTION: and shows X ranges for flux calculation
The averaged region is consistent with HCN 1-0 beam size after convolution (27.8"),
because it's the biggest for this molecule. We used the same beam size for other molecules.
'''
#!/usr/bin/python3.5
# name the output file
psname = 'smm8_cn.eps'
# import packages
from numpy import *
from pylab import *
import matplotlib.pyplot as plt
from matplotlib import *
import pandas as pd
# ------------------------------------------------
# ------------------------------------------------
# find the x ranges (in km/s), which are above 3RMS
# level - for flux integration of line
rms = 0.122 # rms taken from CLASS
rms_3 = 3*rms
rms_2 = 2*rms
name = 'serpens_cn10_smm8.txt'
# read the spectrum
spec_df = pd.read_table(name, delim_whitespace=True, header=None)
### 3 SIGMA ### 3 SIGMA ### 3 SIGMA ### 3 SIGMA ### 3 SIGMA ###
# left (x1) and right (x2) ranges in which we are looking for minima
x1_ran_df = spec_df[(spec_df[0] > -20) & (spec_df[0] < -15)] #change ranges!!
x2_ran_df = spec_df[(spec_df[0] > -15) & (spec_df[0] < -10)]
#change ranges!!
# SERPENS HCN10: -5.0 - -0.0 and 10. - 20.
# for both X ranges take the column with flux and calculate abs(yi - 3rms)
y1_i_rms_3 = (x1_ran_df[1]-rms_3).abs()
y2_i_rms_3 = (x2_ran_df[1]-rms_3).abs()
# join two dataframes, reset and drop old index
# then change the names of column indexes from 011 to 123
final1_df = pd.concat([x1_ran_df, y1_i_rms_3], axis = 1).reset_index(drop=True)
final1_df.columns = [1,2,3]
final2_df = pd.concat([x2_ran_df, y2_i_rms_3], axis = 1).reset_index(drop=True)
final2_df.columns = [1,2,3]
# find the index of item which contains row with the minimum
min1 = final1_df[3].idxmin(axis=1, skipna=True)
min2 = final2_df[3].idxmin(axis=1, skipna=True)
# print the x value of minimum (in km/s)
print ('X1 (3s) =', final1_df[1].ix[min1].round(1))
print ('X2 (3s) =', final2_df[1].ix[min2].round(1))
# ------------------------------------------------
# ------------------------------------------------
### 1 SIGMA ### 1 SIGMA ### 1 SIGMA ### 1 SIGMA ### 1 SIGMA ###
# left (x3) and right (x4) ranges in which we are looking for minima
x3_ran_df = spec_df[(spec_df[0] > 5.0) & (spec_df[0] < 8.0)] #change ranges!!
x4_ran_df = spec_df[(spec_df[0] > 8.0) & (spec_df[0] < 12.1)]
#change ranges!!
# NGC1333 HCN10: -30.0 - -20.0 and -5. - -1.
# for both X ranges take the column with flux and calculate abs(yi - 3rms)
y3_i_rms = (x3_ran_df[1]-rms_2).abs()
y4_i_rms = (x4_ran_df[1]-rms_2).abs()
# join two dataframes, reset and drop old index
# then change the names of column indexes from 011 to 123
final3_df = pd.concat([x3_ran_df, y3_i_rms], axis = 1).reset_index(drop=True)
final3_df.columns = [1,2,3]
final4_df = pd.concat([x4_ran_df, y4_i_rms], axis = 1).reset_index(drop=True)
final4_df.columns = [1,2,3]
# find the index of item which contains row with the minimum
min3 = final3_df[3].idxmin(axis=1, skipna=True)
min4 = final4_df[3].idxmin(axis=1, skipna=True)
# print the x value of minimum (in km/s)
print ('X3 (2s) =', final3_df[1].ix[min3].round(1))
print ('X4 (2s) =', final4_df[1].ix[min4].round(1))
# ------------------------------------------------
# ------------------------------------------------
# ------------------------------------------------
# NOW PLOT THE SPEC WITH 3*RMS LEVEL AND X RANGES #
# ------------------------------------------------
fig = plt.figure(figsize = (9,7), dpi = 400)
#plt.rcParams["font.family"] = "Times New Roman"
rc('font', **{'family':'serif', 'serif':['Times New Roman']})
params = {'backend': 'pdf',
#'axes.labelsize': 12,
#'text.fontsize': 12,
#'legend.fontsize': 12,
#'xtick.labelsize': 7,
#'ytick.labelsize': 7,
# The comm. below determines whether you use LaTeX
# for all text in matplotlib (you probably don't want
# to turn this on, but may)
'text.usetex': False,
# four comm. below (math) determines what is used for math rendering
'mathtext.rm': 'serif',
'mathtext.it': 'serif:italic',
'mathtext.bf': 'serif:bold',
'mathtext.fontset': 'custom',
#'figure.figsize': fig_size,
'axes.unicode_minus': True}
matplotlib.rcParams.update(params)
""" READ INPUT DATA
########## SERPENS, HCN 1-0, center of ave.: 163.5 -142.7, range: 149.6 177.4 -156.6 -128.8 ##########
"""
v_hcn10, Tmb_hcn10 = loadtxt(name, usecols=(0, 1), unpack=True, skiprows=1)
ax = fig.add_subplot(111)
"""
CREATE A PLOT
"""
ax.set_xlabel(r'$\mathrm{V_{LSR}\;[km/s]}$', fontsize = 9)
ax.set_ylabel(r'$\mathrm{T_{MB}\;[K]}$', fontsize = 9)
# major x ticks every 20, minor ticks every 10
# major y ticks every 1, minor ticks every 0.5
major_ticks_x = np.arange(-80, 50, 5)
minor_ticks_x = np.arange(-80, 50, 1)
major_ticks_y = np.arange(0.0, 1.2, 0.2)
minor_ticks_y = np.arange(0.0, 1.2, 0.1)
ax.set_xticks(major_ticks_x)
ax.set_xticks(minor_ticks_x, minor=True)
ax.set_yticks(major_ticks_y)
ax.set_yticks(minor_ticks_y, minor=True)
# Set the tick labels font
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
# label.set_fontname('Arial')
label.set_fontsize(7)
"""
########## SERPENS, HCN 1-0, center of ave.: 163.5 -142.7, range: 149.6 177.4 -156.6 -128.8 ##########
"""
ax.plot(v_hcn10, Tmb_hcn10, color = 'black', linewidth=1.0, linestyle = '-')
plt.axhline(y=rms_3, xmin = -60.0, xmax = 40.0, color = 'red', linewidth=1.5, linestyle = '-')
plt.axhline(y=rms, xmin = -60.0, xmax = 40.0, color = 'green', linewidth=1.5, linestyle = '-')
# THE ANNOTATIONS ON A GRAPH
#---------------------------
# alpha - transparency, fc - a color of inner part of arrow, ec - a color of an edge of arrow
# headwidth - the size of arrow, frac - a lenght of the head of arrow
# shrink - fraction of total length to ‘shrink’ from both ends
#ax.annotate(r'$\mathrm{RMS\;=%.5f\;K;3*RMS\;=%.3f\;K}$'%(rms,rms_3), fontsize=10, xy=(-38.0, 1.13), textcoords='data')
#ax.annotate(r'$\mathrm{set\;window\;-30\;40}$', fontsize=10, xy=(-38.0, 1.1), textcoords='data')
#ax.annotate(r'$\mathrm{X_{1}\;(3s)\;=%.1f \;km/s}$'%(final1_df[1].ix[min1].round(1)), fontsize=10, xy=(-38.0, 1.07), textcoords='data')
#ax.annotate(r'$\mathrm{X_{2}\;(3s)\;=%.1f \;km/s}$'%(final2_df[1].ix[min2].round(1)), fontsize=10, xy=(-38.0, 1.04), textcoords='data')
#ax.annotate(r'$\mathrm{X_{3}\;(1s)\;=%.1f \;km/s}$'%(final3_df[1].ix[min3].round(1)), fontsize=10, xy=(-38.0, 1.01), textcoords='data')
#ax.annotate(r'$\mathrm{X_{4}\;(1s)\;=%.1f \;km/s}$'%(final4_df[1].ix[min4].round(1)), fontsize=10, xy=(-38.0, 0.98), textcoords='data')
# plot the vertical lines for x = min1 and x = min2
plt.axvline(x=final1_df[1].ix[min1].round(1), color='red', linestyle='--')
plt.axvline(x=final2_df[1].ix[min2].round(1), color='red', linestyle='--')
# plot the vertical lines for x = min3 and x = min4
plt.axvline(x=final3_df[1].ix[min3].round(1), color='green', linestyle='--')
plt.axvline(x=final4_df[1].ix[min4].round(1), color='green', linestyle='--')
# the upper and lower axis limits on a LEFT GRAPH
ax.set_xlim([-80.0, 30.0])
ax.set_ylim([-0.1, 1.2])
# close and save file
savefig(psname, format = 'eps', bbox_inches = 'tight')
clf()
|
[
"mirochagnieszka@gmail.com"
] |
mirochagnieszka@gmail.com
|
31012c36efe1adf3e32c6e9600220d6f672511ec
|
027f52cbbd4e9ccd52b73dcf9ed523137ec78815
|
/python_language/Day_Base_Code/Day_07/lambda_function_2nd.py
|
b0b5df2ea3a317f5bc58267771c1524dd860305c
|
[] |
no_license
|
Jade2290/bigdata_class
|
0c851440852857ee44496b7112db580cf9b60d57
|
380ad58d56ea4fbcea81f78f9648b1edf27e0554
|
refs/heads/master
| 2022-04-11T05:05:25.429853
| 2020-03-27T14:30:36
| 2020-03-27T14:30:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,789
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 14 11:48:50 2019
@author: sundooedu
"""
list1=list(range(0,10))
list2=list(filter(lambda x:x%2==0,list1))
list2
list3=list(filter(lambda x:x%2==0 and x%3==0,list1))
def factorial(s):
factor=1
for i in list(range(1,s+1)):
factor *= i
return factor
factorial(5)
list4=list(filter(lambda x: x*3 if x%3==0, list1))
from functools import reduce
factorial=reduce(lambda x,y:x*y,lst5)
lst5=list(range(9,0,-1))
def factorial(x,y):
return x*y
reduce(factorial,lst5)
#%%
bool(0) # '0'만 False 나머지 숫자들은 전부 True (음수도 True)
#%%
bool([]) # list가 비어있으면 False
#%%
bool('') # 문자가 비어있으면 False
#%%
# Q.01
def is_odd(number):
if number%2==0:
answer="짝수"
else:
answer="홀수"
print(answer)
def is_odds(number):
answer=(lambda x : "짝수" if (x%2==0) else "홀수")(number)
print(answer)
is_odds(5)
#%%
# Q.02
def input_avg(*number):
a=(lambda x: x+=i for i in number)
input_avg(input('숫자를 입력하세요 : '))
#%%
# Q.03
input1 = int(input("첫번째 숫자를 입력하세요:"))
input2 = int(input("두번째 숫자를 입력하세요:"))
total = input1 + input2
print("두 수의 합은 %s 입니다" % total)
#%%
# Q.04
#print("you" "need" "python")
#print("you"+"need"+"python")
print("you", "need", "python")
#print("".join(["you", "need", "python"]))
#%%
# Q.05
with open("test.txt", 'w') as f1:
f1.write("Life is too short")
with open("test.txt", 'r') as f2:
line= f2.read()
print(line)
#%%
# Q.06
from datetime import datetime
def writeDiary():
fstr='%Y%m%d'
today_str=datetime.strftime(datetime.now(),fstr)
file_name= today_str +'.diary'
lst=[]
#total_line='' #입력된 모든줄
while True:
line = input('내용을 입력하세요:')
if(line == '!quit'):break
#total_line += line + "\n"
lst.append(line+'\n')
with open(file_name,'w',encoding='utf-8') as f:
#f.write(total_line)
f.writelines(lst)
#f.close()
print('오늘자 파일'+'('+ file_name +')'+'생성완료')
def readDiary():
filename = input('일기파일명 : ')
with open(filename,'r',encoding='utf-8') as fp:
#data=fp.read()
data=fp.readlines()
print("파일내용 : ",data)
#fp.close()
def errorDiary():
print('미존재 메뉴번호')
menu='읽기 1 쓰기 2 : '
menu_bunho=input(menu)
if menu_bunho == '1' :
readDiary()
elif menu_bunho == '2' :
writeDiary()
else:
errorDiary()
|
[
"noreply@github.com"
] |
Jade2290.noreply@github.com
|
fbc6323c971bfc05403d49f31975f0959e172e9e
|
ae8a1d8f23ed08fcc14ecc9a6651cd738790ac00
|
/tests/func/test_fetchdata.py
|
a4f6b1013135688f78c6a4ae54adee32aeb7ddcd
|
[] |
no_license
|
murakami10/crawling-naist-lecture
|
438ef9a6311630178641d2534b553e6ba20a8b5b
|
7e4345983f452f20e9ba03495c42b2e922cb56f7
|
refs/heads/main
| 2023-05-19T10:45:34.863849
| 2021-06-08T09:17:47
| 2021-06-08T09:17:47
| 367,010,980
| 0
| 0
| null | 2021-06-08T09:17:48
| 2021-05-13T10:16:51
|
Python
|
UTF-8
|
Python
| false
| false
| 4,696
|
py
|
import threading
import time
from http.server import HTTPServer, SimpleHTTPRequestHandler
import pytest
import requests
from src.crawling_naist_syllabus.fetch import FetchData
from src.crawling_naist_syllabus.structure import Lecture
@pytest.fixture(scope="session")
def fetch_and_save_syllabus_html(tmpdir_factory):
"""
naistのシラバスを取得し、一時ディレクトリに保存する
:return syllabus.htmlが存在するdirectoryを返す
"""
syllabus_directory = tmpdir_factory.mktemp("syllabus_directory")
response = requests.get("https://syllabus.naist.jp/subjects/preview_list")
syllabus_file = syllabus_directory.join("syllabus.html")
syllabus_file.write(response.content)
# 実際のサイトにスクレイピングするため、アクセスの間隔をあける
time.sleep(1)
response = requests.get("https://syllabus.naist.jp/subjects/preview_detail/666")
detail_file = syllabus_directory.join("detail_1.html")
detail_file.write(response.content)
return syllabus_file.dirpath()
@pytest.fixture(scope="session")
def start_http_server():
"""
現在のdirectory配下を公開する
"""
host, port = ("127.0.0.1", 8888)
url = f"http://{host}:{port}/tests/index.html"
server = HTTPServer((host, port), SimpleHTTPRequestHandler)
thred = threading.Thread(target=server.serve_forever)
thred.start()
yield url
server.shutdown()
thred.join()
@pytest.fixture(scope="session")
def start_http_server_with_specific_directory(fetch_and_save_syllabus_html):
"""
指定したdirectoryをlocalhostで公開する
:param fetch_and_save_syllabus_html 公開するdirectory
"""
class HandlerWithDirectory(SimpleHTTPRequestHandler):
def __init__(self, *args, **kwargs):
"""
指定したdirectoryを公開
"""
super().__init__(*args, directory=fetch_and_save_syllabus_html, **kwargs)
host, port = ("127.0.0.1", 8889)
server = HTTPServer((host, port), HandlerWithDirectory)
url = f"http://{host}:{port}/"
# スレッドの起動
thred = threading.Thread(target=server.serve_forever)
thred.start()
yield url
server.shutdown()
thred.join()
@pytest.fixture()
def fetch_data(start_http_server_with_specific_directory):
"""
FetchDataのインスタンスを返す
"""
fd = FetchData(start_http_server_with_specific_directory + "syllabus.html")
return fd
@pytest.mark.parametrize(
"invalid_url",
[
"http://127.0.0.1:8888/not_existed_index.html",
"httpaaaa",
],
)
def test_init_with_invalid_url(start_http_server, invalid_url):
with pytest.raises(Exception):
FetchData(invalid_url)
def test_init_with_valid_url(start_http_server):
try:
_ = FetchData(start_http_server)
except Exception:
pytest.fail("Exception raised")
general_lecture = Lecture(
name="技術と倫理",
url="http://127.0.0.1:8889/subjects/preview_detail/644",
)
introduction_lecture = Lecture(
name="情報理工学序論",
url="http://127.0.0.1:8889/subjects/preview_detail/662",
)
basic_lecture = Lecture(
name="情報科学基礎Ⅰ",
url="http://127.0.0.1:8889/subjects/preview_detail/791",
)
specialized_lecture = Lecture(
name="ソフトウェア工学",
url="http://127.0.0.1:8889/subjects/preview_detail/688",
)
@pytest.mark.parametrize(
"lecture_type, contained_data",
[
(FetchData.LECTURE_TYPE_GENERAL, general_lecture),
(FetchData.LECTURE_TYPE_INTRODUCTION, introduction_lecture),
(FetchData.LECTURE_TYPE_BASIC, basic_lecture),
(FetchData.LECTURE_TYPE_SPECIALIZED, specialized_lecture),
],
)
def test_scrape_name_and_url(fetch_data, lecture_type, contained_data):
name_and_url_list = fetch_data.scrape_name_and_url(lecture_type)
assert contained_data in name_and_url_list
def test_scrape_name_and_url_key_error(fetch_data):
with pytest.raises(KeyError):
fetch_data.scrape_name_and_url("key error")
def dummy_init(self, url):
pass
def test_scrape_detail_of_lecture(
start_http_server_with_specific_directory, monkeypatch
):
monkeypatch.setattr(FetchData, "__init__", dummy_init)
fetch_data = FetchData("url")
detail_url = start_http_server_with_specific_directory + "/detail_1.html"
lecture = Lecture(name="高性能計算基盤", url=detail_url)
lecture = fetch_data.scrape_detail(lecture)
assert 1 == lecture.details[0].number
assert "4/22 [2]" == lecture.details[0].date
assert "スーパスカラとVLIW (日本語教科書8章)" == lecture.details[0].theme
|
[
"m.kyoya777@gmail.com"
] |
m.kyoya777@gmail.com
|
809258fbebe5a4d58326b515a82977274a9a9cba
|
0bcd128368e2de959ca648960ffd7944067fcf27
|
/infra/bots/assets/protoc/create.py
|
e363cc5068230dadf10809450d4bff5a04c530b9
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
google/skia
|
ac6e39179cd33cf0c8a46d29c1a70bf78b4d74ee
|
bf6b239838d3eb56562fffd0856f4047867ae771
|
refs/heads/main
| 2023-08-31T21:03:04.620734
| 2023-08-31T18:24:15
| 2023-08-31T20:20:26
| 15,773,229
| 8,064
| 1,487
|
BSD-3-Clause
| 2023-09-11T13:42:07
| 2014-01-09T17:09:57
|
C++
|
UTF-8
|
Python
| false
| false
| 768
|
py
|
#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Create the asset."""
import argparse
import subprocess
ZIP_URL = ('https://github.com/google/protobuf/releases/download/v3.3.0/'
'protoc-3.3.0-linux-x86_64.zip')
def create_asset(target_dir):
"""Create the asset."""
local_zip = '/tmp/protoc.zip'
subprocess.check_call(['curl', '-L', ZIP_URL, '-o', local_zip])
subprocess.check_call(['unzip', local_zip, '-d', target_dir])
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--target_dir', '-t', required=True)
args = parser.parse_args()
create_asset(args.target_dir)
if __name__ == '__main__':
main()
|
[
"skia-commit-bot@chromium.org"
] |
skia-commit-bot@chromium.org
|
154bb8fc3ec4ecff7d4664dd60a36d89d9e9c287
|
7bd5ca970fbbe4a3ed0c7dadcf43ba8681a737f3
|
/atcoder/abc/abc034/d.py
|
39cae32435a6c76186d919f097f04446d71aa1b3
|
[] |
no_license
|
roiti46/Contest
|
c0c35478cd80f675965d10b1a371e44084f9b6ee
|
c4b850d76796c5388d2e0d2234f90dc8acfaadfa
|
refs/heads/master
| 2021-01-17T13:23:30.551754
| 2017-12-10T13:06:42
| 2017-12-10T13:06:42
| 27,001,893
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 438
|
py
|
# -*- coding: utf-8 -*-
import sys,copy,math,heapq,itertools as it,fractions,re,bisect,collections as coll
N, K = map(int, raw_input().split())
w, p = [], []
for i in xrange(N):
wi, pi = map(int, raw_input().split())
w.append(wi)
p.append(pi)
wp = [[1.0*p[i]/w[i],i] for i in xrange(N)]
wp = sorted(wp, key = lambda x: x[0], reverse = True)
W, S = 0, 0
for i in xrange(K):
W += w[i]
S += p[i] * w[i]
print 1.0*S/W
|
[
"roiti46@gmail.com"
] |
roiti46@gmail.com
|
5293142db0238e500bc8e112bb402720c56a2c77
|
99c4d4a6592fded0e8e59652484ab226ac0bd38c
|
/code/batch-2/vse-naloge-brez-testov/DN5-Z-146.py
|
9e28868b4bb26ff8e9f3184e25bbf744f394f63e
|
[] |
no_license
|
benquick123/code-profiling
|
23e9aa5aecb91753e2f1fecdc3f6d62049a990d5
|
0d496d649247776d121683d10019ec2a7cba574c
|
refs/heads/master
| 2021-10-08T02:53:50.107036
| 2018-12-06T22:56:38
| 2018-12-06T22:56:38
| 126,011,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,661
|
py
|
def unikati(s):
t = []
for i in s:
if i not in t:
t.append(i)
return t
def avtor(tvit):
a = tvit.split(": ")
return a[0]
def vsi_avtorji(tviti):
t = []
a = [i.split(': ')[0] for i in tviti]
for name in a:
if name not in t:
t.append(name)
return t
def izloci_besedo(beseda):
s = 0
z = 1
y = ""
x = ""
g = ""
for b in beseda:
if b.isalnum() == False:
s += 1
elif b.isalnum() == True:
break
y += beseda[s:]
for d in y[::-1]:
if d.isalnum() == False:
z += 1
elif d.isalnum() == True:
break
x += beseda[:-z]
for i in y:
if i in x:
g += i
return g
def se_zacne_z(tvit, c):
n = []
a = tvit.split(" ")
while (True):
for i in a:
if i.isalnum() == False and i[0][:1] == c:
n.append(i[1:])
for d in n:
if d.isalnum() == False:
n.append(d[:-1])
n.remove(d)
n.sort()
return n
def zberi_se_zacne_z(tviti, c):
n = []
s = []
a = [i.split(' ') for i in tviti]
for e in a:
for d in e:
if d[0] == c:
n.append(d[1:])
for k in n:
if k.isalnum() == False:
n.append(k[:-1])
n.remove(k)
for i in n:
if i not in s:
s.append(i)
return s
def vse_afne(tviti):
n = []
s = []
a = [i.split(" ") for i in tviti]
while (True):
for tvit in a:
for e in tvit:
if e[0] == "@":
n.append(e[1:])
for d in n:
if d.isalnum() == False:
n.append(d[:-1])
n.remove(d)
for i in n:
if i not in s:
s.append(i)
break
return s
def vsi_hashtagi(tviti):
a = [i.split(" ") for i in tviti]
n = []
s = []
while (True):
for tvit in a:
for e in tvit:
if e[0] == "#":
n.append(e[1:])
for d in n:
if d.isalnum() == False:
n.append(d[:-1])
n.remove(d)
for i in n:
if i not in s:
s.append(i)
break
return s
def vse_osebe(tviti):
a = vse_afne(tviti)
b = vsi_avtorji(tviti)
return sorted(unikati(a+b))
|
[
"benjamin.fele@gmail.com"
] |
benjamin.fele@gmail.com
|
7c7c490a043db3015b8dbbef12cc43020cbffd1a
|
67dd5749b247915ce7a0d3d95964e30503c4aa0c
|
/dev/getting_indexing_right.py
|
2c5e5b0945cafd7cd5a79d587531fd4bb7a72c15
|
[] |
no_license
|
yddream/timspy
|
d74d78825844b69ed9730373809e3f09ab52060c
|
034788db83d85dfca01fa31281a6de391ea2fe23
|
refs/heads/master
| 2022-11-11T13:10:48.880175
| 2020-06-23T07:38:21
| 2020-06-23T07:38:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,074
|
py
|
"""How to get the data."""
%load_ext autoreload
%autoreload 2
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pathlib import Path
from timspy.timspy import TimsDIA
from timspy.plot import plot_spectrum
from timsdata import TimsData
# plt.style.use('dark_background')
pd.set_option('display.max_rows', 10)
pd.set_option('display.max_columns', 500)
# I have my data here
p = Path('/home/matteo/Projects/bruker/BrukerMIDIA/MIDIA_CE10_precursor/20190912_HeLa_Bruker_TEN_MIDIA_200ng_CE10_100ms_Slot1-9_1_488.d')
# TD = TimsData(p) # timsdata does not support :
TD = TimsDIA(p) # timsdata does not support :
update_wrapper(TD.iter, TD.iter_arrays)
TD.iter.__getitem__
?TD.iter_arrays
TD[1:10,:]
next(TD.iter[1:10, 100:500])
list(TD.iter[1:10, 100:500])
TD[1:10, 100:500]
TD[1:100, 100]
list(TD.iter[1:100, 100])
TD[1:10, 100:500].dtype
TD[[10, 20, 30], 100:500]
TD[[10, 20, 30], [40, 49]]
TD[[10, 20, 30], [41, 60]]
TD[:20, [41, 60]]
TD[11552,10]
TD[11552:,10] # exception will be raised automatically!
TD[(i**2 for i in range(1,10)), 10:50]
|
[
"matteo.lacki@gmail.com"
] |
matteo.lacki@gmail.com
|
153eaf590327a3928e4f39de1f87e5e3b6434798
|
cb0e7d6493b23e870aa625eb362384a10f5ee657
|
/solutions/python3/0200.py
|
bf259566a4c651f2576a82f375668f59e4e04686
|
[] |
no_license
|
sweetpand/LeetCode-1
|
0acfa603af254a3350d457803449a91322f2d1a7
|
65f4ef26cb8b2db0b4bf8c42bfdc76421b479f94
|
refs/heads/master
| 2022-11-14T07:01:42.502172
| 2020-07-12T12:25:56
| 2020-07-12T12:25:56
| 279,088,171
| 1
| 0
| null | 2020-07-12T15:03:20
| 2020-07-12T15:03:19
| null |
UTF-8
|
Python
| false
| false
| 739
|
py
|
class Solution:
def numIslands(self, grid: List[List[str]]) -> int:
def dfs(i: int, j: int) -> None:
if i < 0 or j < 0 or i >= len(grid) or j >= len(grid[0]) or visited[i][j] or grid[i][j] == '0':
return
visited[i][j] = True
dfs(i + 1, j)
dfs(i - 1, j)
dfs(i, j + 1)
dfs(i, j - 1)
if not grid:
return 0
m = len(grid)
n = len(grid[0])
ans = 0
visited = [[False] * n for _ in range(m)]
for i in range(m):
for j in range(n):
if not visited[i][j] and grid[i][j] == '1':
ans += 1
dfs(i, j)
return ans
|
[
"walkccray@gmail.com"
] |
walkccray@gmail.com
|
493149b85cd8f8ac097c095a5d003bc8d8250e26
|
2f44cecd8fc447c9e2f2d9f55abdea36ebb40cc5
|
/84.py
|
ef3779fbdfec69cf469c9f29d365c5b3495f449a
|
[] |
no_license
|
yuzumei/leetcode
|
751a234b429131169e3eaf4594ffeb3b94f6ab34
|
b6708b03c92ec92e89fc7ecf13f1995dee346657
|
refs/heads/master
| 2023-07-28T05:48:53.192948
| 2021-09-11T06:16:07
| 2021-09-11T06:16:07
| 365,780,916
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,234
|
py
|
heights=[6,7,5,2,4,5,9,3]
def search(heights):
leftstack=[]
leftnum=[]
for i,num in enumerate(heights):
if not leftstack:
leftstack.append([num,i])
leftnum.append(-1)
else:
while leftstack:
if num<=leftstack[-1][0]:
leftstack.pop()
else:
break
if leftstack:
leftnum.append(leftstack[-1][1])
else:
leftnum.append(-1)
leftstack.append([num,i])
return leftnum
leftnum=search(heights)
rightnum=search(heights[::-1])[::-1]
print(leftnum,rightnum)
ans=-1
for i in range(len(heights)):
ans=max(ans,(len(heights)-2-rightnum[i]-leftnum[i])*heights[i])
print(ans)
'''遍历两次'''
def twoside(heights):
n=len(heights)
if n==0:
return 0
leftnum=[0]*n
rightnum=[n]*n
stack=[]
for i,num in enumerate(heights):
while stack and num<=stack[-1][1]:
temp=stack.pop()
rightnum[temp[0]]=i
leftnum[i]=stack[-1][0] if stack else -1
stack.append([i,num])
ans=max((rightnum[i]-leftnum[i]-1)*heights[i] for i in range(n))
return ans
print(twoside(heights))
|
[
"973802530@qq.com"
] |
973802530@qq.com
|
0b1ea520891319bd3ec29901ce458c89203a9974
|
15a992391375efd487b6442daf4e9dd963167379
|
/tests/runner.py
|
7356581365e84bd1ae22e702cf2f4a2df1dc1e59
|
[
"Apache-2.0"
] |
permissive
|
Bala93/MONAI
|
b0e68e1b513adcd20eab5158d4a0e5c56347a2cd
|
e0a7eff5066da307a73df9145077f6f1fec7a514
|
refs/heads/master
| 2022-08-22T18:01:25.892982
| 2022-08-12T18:13:53
| 2022-08-12T18:13:53
| 259,398,958
| 2
| 0
| null | 2020-04-27T17:09:12
| 2020-04-27T17:09:11
| null |
UTF-8
|
Python
| false
| false
| 5,491
|
py
|
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import inspect
import os
import re
import sys
import time
import unittest
from monai.utils import PerfContext
results: dict = {}
class TimeLoggingTestResult(unittest.TextTestResult):
"""Overload the default results so that we can store the results."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.timed_tests = {}
def startTest(self, test): # noqa: N802
"""Start timer, print test name, do normal test."""
self.start_time = time.time()
name = self.getDescription(test)
self.stream.write(f"Starting test: {name}...\n")
super().startTest(test)
def stopTest(self, test): # noqa: N802
"""On test end, get time, print, store and do normal behaviour."""
elapsed = time.time() - self.start_time
name = self.getDescription(test)
self.stream.write(f"Finished test: {name} ({elapsed:.03}s)\n")
if name in results:
raise AssertionError("expected all keys to be unique")
results[name] = elapsed
super().stopTest(test)
def print_results(results, discovery_time, thresh, status):
# only keep results >= threshold
results = dict(filter(lambda x: x[1] > thresh, results.items()))
if len(results) == 0:
return
print(f"\n\n{status}, printing completed times >{thresh}s in ascending order...\n")
timings = dict(sorted(results.items(), key=lambda item: item[1]))
for r in timings:
if timings[r] >= thresh:
print(f"{r} ({timings[r]:.03}s)")
print(f"test discovery time: {discovery_time:.03}s")
print(f"total testing time: {sum(results.values()):.03}s")
print("Remember to check above times for any errors!")
def parse_args():
parser = argparse.ArgumentParser(description="Runner for MONAI unittests with timing.")
parser.add_argument(
"-s", action="store", dest="path", default=".", help="Directory to start discovery (default: '%(default)s')"
)
parser.add_argument(
"-p",
action="store",
dest="pattern",
default="test_*.py",
help="Pattern to match tests (default: '%(default)s')",
)
parser.add_argument(
"-t",
"--thresh",
dest="thresh",
default=10.0,
type=float,
help="Display tests longer than given threshold (default: %(default)d)",
)
parser.add_argument(
"-v",
"--verbosity",
action="store",
dest="verbosity",
type=int,
default=1,
help="Verbosity level (default: %(default)d)",
)
parser.add_argument("-q", "--quick", action="store_true", dest="quick", default=False, help="Only do quick tests")
parser.add_argument(
"-f", "--failfast", action="store_true", dest="failfast", default=False, help="Stop testing on first failure"
)
args = parser.parse_args()
print(f"Running tests in folder: '{args.path}'")
if args.pattern:
print(f"With file pattern: '{args.pattern}'")
return args
def get_default_pattern(loader):
signature = inspect.signature(loader.discover)
params = {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty}
return params["pattern"]
if __name__ == "__main__":
# Parse input arguments
args = parse_args()
# If quick is desired, set environment variable
if args.quick:
os.environ["QUICKTEST"] = "True"
# Get all test names (optionally from some path with some pattern)
with PerfContext() as pc:
# the files are searched from `tests/` folder, starting with `test_`
files = glob.glob(os.path.join(os.path.dirname(__file__), "test_*.py"))
cases = []
for test_module in {os.path.basename(f)[:-3] for f in files}:
if re.match(args.pattern, test_module):
cases.append(f"tests.{test_module}")
else:
print(f"monai test runner: excluding tests.{test_module}")
tests = unittest.TestLoader().loadTestsFromNames(cases)
discovery_time = pc.total_time
print(f"time to discover tests: {discovery_time}s, total cases: {tests.countTestCases()}.")
test_runner = unittest.runner.TextTestRunner(
resultclass=TimeLoggingTestResult, verbosity=args.verbosity, failfast=args.failfast
)
# Use try catches to print the current results if encountering exception or keyboard interruption
try:
test_result = test_runner.run(tests)
print_results(results, discovery_time, args.thresh, "tests finished")
sys.exit(not test_result.wasSuccessful())
except KeyboardInterrupt:
print_results(results, discovery_time, args.thresh, "tests cancelled")
sys.exit(1)
except Exception:
print_results(results, discovery_time, args.thresh, "exception reached")
raise
|
[
"noreply@github.com"
] |
Bala93.noreply@github.com
|
988fcc3bad6c71902716794d5af98b5ed49ce94a
|
1f936103af336af6bbd335f45d6baa55c426922b
|
/monatbx/test_cov.py
|
fed4bc94b6147527eeec78e5faf7cafd539bf891
|
[] |
no_license
|
monarin/monatbx
|
2ec342d67f1fbccb82656218ffd136f2eb7d96ab
|
43f56974f811e5b2b0dcc428d4f9b36043ed9d04
|
refs/heads/master
| 2020-06-18T13:08:58.893701
| 2016-11-30T00:58:18
| 2016-11-30T00:58:18
| 75,136,381
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 317
|
py
|
import numpy as np
from cctbx.array_family import flex
G = [5, 6, 0.1, 20, 16, 12, 11, 10, 11.5, 15]
B = [100, 80, 200, 60, 70, 80, 85, 90, 70, 40]
rotx = [0.01, 0.002, 0.001, 0.05, 0.1, 0.025, 0.008, 0.01, 0.002, 0.001]
X = np.array([G, B, rotx])
print X
COV = np.cov(X)
print COV
CORR = np.correlate(X)
print CORR
|
[
"monarin@gmail.com"
] |
monarin@gmail.com
|
dca8dd72172381372dcb94f00fbeecad81c8ddd6
|
e2e08d7c97398a42e6554f913ee27340226994d9
|
/pyautoTest-master(ICF-7.5.0)/test_case/scg_old/scg_Route/test_c37280.py
|
e1f0802b3947f00158ecb0ded8f58e68ea24a2c8
|
[] |
no_license
|
lizhuoya1111/Automated_testing_practice
|
88e7be512e831d279324ad710946232377fb4c01
|
b3a532d33ddeb8d01fff315bcd59b451befdef23
|
refs/heads/master
| 2022-12-04T08:19:29.806445
| 2020-08-14T03:51:20
| 2020-08-14T03:51:20
| 287,426,498
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,359
|
py
|
import pytest
import time
import sys
from page_obj.common.rail import *
from os.path import dirname, abspath
from page_obj.common.ssh import *
from page_obj.scg.scg_def_static_route import *
from page_obj.scg.scg_def_interface import *
sys.path.insert(0, dirname(dirname(abspath(__file__))))
test_id = 37280
# 验证批量删除条目、单条删除路由
def test_route_wxw(browser):
try:
# 验证批量删除路由
login_web(browser, url="10.2.2.83")
add_static_route_single_wxw(browser, ip='20.1.1.0', mask='24', out_device='ge0/2', gateway='13.1.1.1',
enable='yes')
add_static_route_single_wxw(browser, ip='21.1.1.0', mask='24', out_device='ge0/2', gateway='13.1.1.1',
enable='yes')
login_web(browser, url="10.2.2.81")
change_physical_interface_workmode_wxw(browser, interface='ge0/5',
route="yes", ip='21.1.1.1', mask='24',
trans='no')
ssh = SSH("10.1.1.202", 'root', 'root', 22)
ssh.connect()
ssh.execute('route add -net 13.1.1.0/24 gw 20.1.1.1')
result1 = ssh.execute('ping 13.1.1.3 -c 3')
# print(result1)
ssh.close()
ssh = SSH("10.1.1.212", 'root', 'root', 22)
ssh.connect()
ssh.execute('route add -net 13.1.1.0/24 gw 21.1.1.1')
result2 = ssh.execute('ping 13.1.1.3 -c 3')
# print(result2)
ssh.close()
login_web(browser, url="10.2.2.83")
# 删除两段路由
del_static_route_single_wxw(browser, destination1='20.1.1.0/255.255.255.0',
destination2='21.1.1.0/255.255.255.0')
ssh = SSH("10.1.1.202", 'root', 'root', 22)
ssh.connect()
result1_1 = ssh.execute('ping 13.1.1.3 -c 3')
# print(result1_1)
ssh.close()
ssh = SSH("10.1.1.212", 'root', 'root', 22)
ssh.connect()
result2_1 = ssh.execute('ping 13.1.1.3 -c 3')
# print(result2_1)
ssh.execute('route del -net 13.1.1.0/24 gw 21.1.1.1')
ssh.close()
# 验证单条删除路由
login_web(browser, url="10.2.2.83")
add_static_route_single_wxw(browser, ip='20.1.1.0', mask='24', out_device='ge0/2', gateway='13.1.1.1',
enable='yes')
ssh = SSH("10.1.1.202", 'root', 'root', 22)
ssh.connect()
ssh.execute('route add -net 13.1.1.0/24 gw 20.1.1.1')
result3 = ssh.execute('ping 13.1.1.3 -c 3')
# print(result3)
ssh.close()
del_ipv4_static_route_bydestination(browser, destination='20.1.1.0/255.255.255.0')
ssh = SSH("10.1.1.202", 'root', 'root', 22)
ssh.connect()
ssh.execute('route add -net 13.1.1.0/24 gw 20.1.1.1')
result3_1 = ssh.execute('ping 13.1.1.3 -c 3')
# print(result3_1)
ssh.execute('route del -net 13.1.1.0/24 gw 21.1.1.1')
ssh.close()
login_web(browser, url="10.2.2.81")
change_physical_interface_workmode_wxw(browser, interface='ge0/5',
route="no", ip='21.1.1.1', mask='24',
trans='yes')
try:
assert "ttl" in result1
assert "ttl" in result2
assert "100% packet loss" in result1_1
assert "100% packet loss" in result2_1
assert "ttl"in result3
assert "100% packet loss" in result3_1
rail_pass(test_run_id, test_id)
except:
rail_fail(test_run_id, test_id)
assert "ttl" in result1
assert "ttl" in result2
assert "100% packet loss" in result1_1
assert "100% packet loss" in result2_1
assert "ttl" in result3
assert "100% packet loss" in result3_1
except Exception as err:
# 如果上面的步骤有报错,重新设备,恢复配置
print(err)
rail_fail(test_run_id, test_id)
reload()
assert False
if __name__ == '__main__':
pytest.main(["-v", "-s", "test_c"+str(test_id)+".py"])
|
[
"15501866985@163.com"
] |
15501866985@163.com
|
4605daa21593dccfe9560412c3fb87185a00fb91
|
726754863696235c66c8ed9aa184fc13ade33afe
|
/basics/rotation_count.py
|
116e425096403014edc34a74e8a6695ac5528788
|
[] |
no_license
|
harshalms/python
|
ef6a3eeb93c5051528cb0b76fd600a2943e10616
|
bfea8f00795c4308b09d80852cb995a8109c1568
|
refs/heads/master
| 2021-07-17T18:52:36.053780
| 2020-07-25T15:45:41
| 2020-07-25T15:45:41
| 189,624,661
| 0
| 0
| null | 2019-06-17T05:37:24
| 2019-05-31T16:20:52
|
Python
|
UTF-8
|
Python
| false
| false
| 461
|
py
|
'''GeeksForGeeks
Find the Rotation Count in Rotated Sorted array
Consider an array of distinct numbers sorted in increasing order.
The array has been rotated (clockwise) k number of times. Given such an array,
find the value of k.
Approch : Just find the index of minimum element.
'''
A = [15, 18, 2, 3, 6, 12]
def indexOFmin(A):
min = A[0]
for i in range(len(A)):
if min > A[i]:
min, k = A[i], i
return k
print(indexOFmin(A))
|
[
"harshal95iitk@gmail.com"
] |
harshal95iitk@gmail.com
|
c4194b35b0c4d19fbf61ada823bfe4a80aa83e71
|
40f4908483b98fc4f370ff4f2d520e1284d045b3
|
/phase02/immortals_repo/harness/pymmortals/generated/com/securboration/immortals/ontology/resources/memory/physicalmemoryresource.py
|
9be4f40b65705d8717bea20004f42fc927ec8dd1
|
[] |
no_license
|
TF-185/bbn-immortals
|
7f70610bdbbcbf649f3d9021f087baaa76f0d8ca
|
e298540f7b5f201779213850291337a8bded66c7
|
refs/heads/master
| 2023-05-31T00:16:42.522840
| 2019-10-24T21:45:07
| 2019-10-24T21:45:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,009
|
py
|
from pymmortals.generated.com.securboration.immortals.ontology.property.property import Property
from pymmortals.generated.com.securboration.immortals.ontology.resources.memory.memoryresource import MemoryResource
from pymmortals.generated.com.securboration.immortals.ontology.resources.memory.memorytype import MemoryType
from typing import List
# noinspection PyPep8Naming
class PhysicalMemoryResource(MemoryResource):
_validator_values = dict()
_types = dict()
def __init__(self,
canRead: bool = None,
canWrite: bool = None,
humanReadableDescription: str = None,
maxAvailableBytes: int = None,
memoryType: MemoryType = None,
resourceProperty: List[Property] = None):
super().__init__(canRead=canRead, canWrite=canWrite, humanReadableDescription=humanReadableDescription, maxAvailableBytes=maxAvailableBytes, resourceProperty=resourceProperty)
self.memoryType = memoryType
|
[
"awellman@bbn.com"
] |
awellman@bbn.com
|
9ebcfe440aadfcb8bf00181e22e0cfadd7c707ac
|
9b6a8923e783bd2641d7af3b118ff83f38c1de31
|
/review/list/list.py
|
db0572268d2cf805fbed1cf651fd1c590a2e4f44
|
[] |
no_license
|
mbrsagor/PyLearn
|
1c625698802fc5325ea06b754dc9b80d716d9f31
|
94e68f10efd1f5b1a26d1fd965a29dbbe6c2253d
|
refs/heads/master
| 2023-02-25T05:50:23.530150
| 2021-01-31T19:09:01
| 2021-01-31T19:09:01
| 263,316,318
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 519
|
py
|
students = [
{
"name": "Jacob Martin",
"father name": "Ros Martin",
"Address": "123 Hill Street",
}, {
"name": "Angela Stevens",
"father name": "Robert Stevens",
"Address": "3 Upper Street London",
}, {
"name": "Ricky Smart",
"father name": "William Smart",
"Address": "Unknown",
}
]
names_list = [student['name'] for student in students]
print(names_list)
f = ["Mango", "Apple", "Orange"]
a, b, c = f
print(a)
print(b)
print(c)
|
[
"mbrsagor@gmail.com"
] |
mbrsagor@gmail.com
|
544691d1ddac1a2ff9e0419bfc69e8b15f00a0b1
|
a8c0867109974ff7586597fe2c58521277ab9d4d
|
/LC648.py
|
e3823a187d57e17a47a6641bfc77b27bfb8ab450
|
[] |
no_license
|
Qiao-Liang/LeetCode
|
1491b01d2ddf11495fbc23a65bb6ecb74ac1cee2
|
dbdb227e12f329e4ca064b338f1fbdca42f3a848
|
refs/heads/master
| 2023-05-06T15:00:58.939626
| 2021-04-21T06:30:33
| 2021-04-21T06:30:33
| 82,885,950
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 810
|
py
|
class Solution(object):
def replaceWords(self, dict, sentence):
"""
:type dict: List[str]
:type sentence: str
:rtype: str
"""
sen_list = sentence.split(' ')
for idx, word in enumerate(sen_list):
min_root = word
len_word = len(word)
for root in dict:
len_root = len(root)
if len_root < len(min_root) and len_root <= len_word and word[:len_root] == root:
min_root = root
sen_list[idx] = min_root
return ' '.join(sen_list)
sol = Solution()
# dict = ["cat", "bat", "rat"]
# sentence = "the cattle was rattled by the battery"
dict = ["a", "b", "c"]
sentence = "aadsfasf absbs bbab cadsfafs"
print(sol.replaceWords(dict, sentence))
|
[
"qiaoliang@Qiaos-MacBook-Pro.local"
] |
qiaoliang@Qiaos-MacBook-Pro.local
|
a8c5c7e3b1280c05481bf5cf00de1d61e37f5aa5
|
923d035a4762a19b30d5900db91143a83837ae70
|
/ichnaea/async/config.py
|
1fd35092f9eaed4de2aaf9188da99570999ed30d
|
[
"Apache-2.0"
] |
permissive
|
voolitels/ichnaea
|
d5d5da34cb30b3e0c85675e32dab3972cc31d7b0
|
bd0350fcba9efb0bad3957309ed3a471ae07e41b
|
refs/heads/master
| 2021-01-17T14:21:16.056481
| 2015-11-10T16:38:22
| 2015-11-10T16:57:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,402
|
py
|
"""
Contains celery specific one time configuration code.
"""
import os
from kombu import Queue
from kombu.serialization import register
from ichnaea.async.schedule import CELERYBEAT_SCHEDULE
from ichnaea.cache import configure_redis
from ichnaea.config import read_config
from ichnaea import internaljson
from ichnaea.db import configure_db
from ichnaea.geoip import configure_geoip
from ichnaea.log import (
configure_raven,
configure_stats,
)
from ichnaea.queue import (
DataQueue,
ExportQueue,
)
CELERY_QUEUES = (
Queue('celery_cell', routing_key='celery_cell'),
Queue('celery_default', routing_key='celery_default'),
Queue('celery_export', routing_key='celery_export'),
Queue('celery_incoming', routing_key='celery_incoming'),
Queue('celery_monitor', routing_key='celery_monitor'),
Queue('celery_ocid', routing_key='celery_ocid'),
Queue('celery_reports', routing_key='celery_reports'),
Queue('celery_upload', routing_key='celery_upload'),
Queue('celery_wifi', routing_key='celery_wifi'),
) #: List of :class:`kombu.Queue` instances.
register('internal_json',
internaljson.internal_dumps,
internaljson.internal_loads,
content_type='application/x-internaljson',
content_encoding='utf-8')
def configure_celery(celery_app):
"""
Configure the celery app stored in :data:`ichnaea.async.app.celery_app`.
This is executed both inside the master worker process and once in
each forked worker process.
This parses the application ini and reads in the
:mod:`ichnaea.async.settings`.
"""
conf = read_config()
if conf.has_section('celery'):
section = conf.get_map('celery')
else: # pragma: no cover
# happens while building docs locally and on rtfd.org
return
# testing settings
always_eager = bool(os.environ.get('CELERY_ALWAYS_EAGER', False))
redis_uri = os.environ.get('REDIS_URI', 'redis://localhost:6379/1')
if always_eager and redis_uri:
broker_url = redis_uri
result_url = redis_uri
else: # pragma: no cover
broker_url = section['broker_url']
result_url = section['result_url']
celery_app.config_from_object('ichnaea.async.settings')
celery_app.conf.update(
BROKER_URL=broker_url,
CELERY_RESULT_BACKEND=result_url,
CELERY_QUEUES=CELERY_QUEUES,
CELERYBEAT_SCHEDULE=CELERYBEAT_SCHEDULE,
)
def configure_data(redis_client):
"""
Configure fixed set of data queues.
"""
data_queues = {
'update_cell': DataQueue('update_cell', redis_client,
queue_key='update_cell'),
'update_cellarea': DataQueue('update_cellarea', redis_client,
queue_key='update_cellarea'),
'update_cellarea_ocid': DataQueue('update_cellarea_ocid', redis_client,
queue_key='update_cellarea_ocid'),
'update_score': DataQueue('update_score', redis_client,
queue_key='update_score'),
}
for shard_id in ('ne', 'nw', 'se', 'sw'):
name = 'update_datamap_' + shard_id
data_queues[name] = DataQueue(name, redis_client, queue_key=name)
for shard_id in ['%x' % i for i in range(16)]:
name = 'update_wifi_' + shard_id
data_queues[name] = DataQueue(
name, redis_client, queue_key=name)
return data_queues
def configure_export(redis_client, app_config):
"""
Configure export queues, based on the `[export:*]` sections from
the application ini file.
"""
export_queues = {}
for section_name in app_config.sections():
if section_name.startswith('export:'):
section = app_config.get_map(section_name)
name = section_name.split(':')[1]
export_queues[name] = ExportQueue(name, redis_client, section)
return export_queues
def init_worker(celery_app, app_config,
_db_rw=None, _db_ro=None, _geoip_db=None,
_raven_client=None, _redis_client=None, _stats_client=None):
"""
Configure the passed in celery app, usually stored in
:data:`ichnaea.async.app.celery_app`.
Does connection, settings and queue setup. Attaches some
additional functionality to the :class:`celery.Celery` instance.
This is executed inside each forked worker process.
The parameters starting with an underscore are test-only hooks
to provide pre-configured connection objects.
:param _db_ro: Ignored, read-only database connection isn't used.
"""
# make config file settings available
celery_app.settings = app_config.asdict()
# configure outside connections
celery_app.db_rw = configure_db(
app_config.get('database', 'rw_url'), _db=_db_rw)
celery_app.raven_client = raven_client = configure_raven(
app_config.get('sentry', 'dsn'),
transport='threaded', _client=_raven_client)
celery_app.redis_client = redis_client = configure_redis(
app_config.get('cache', 'cache_url'), _client=_redis_client)
celery_app.stats_client = configure_stats(
app_config, _client=_stats_client)
celery_app.geoip_db = configure_geoip(
app_config.get('geoip', 'db_path'), raven_client=raven_client,
_client=_geoip_db)
# configure data / export queues
celery_app.all_queues = all_queues = set([q.name for q in CELERY_QUEUES])
celery_app.data_queues = data_queues = configure_data(redis_client)
for queue in data_queues.values():
if queue.monitor_name:
all_queues.add(queue.monitor_name)
celery_app.export_queues = configure_export(redis_client, app_config)
for queue in celery_app.export_queues.values():
if queue.monitor_name:
all_queues.add(queue.monitor_name)
def shutdown_worker(celery_app):
"""
Close outbound connections and remove custom celery_app state.
This is executed inside each forked worker process.
"""
celery_app.db_rw.engine.pool.dispose()
del celery_app.db_rw
del celery_app.raven_client
celery_app.redis_client.connection_pool.disconnect()
del celery_app.redis_client
del celery_app.stats_client
del celery_app.all_queues
del celery_app.data_queues
del celery_app.export_queues
del celery_app.settings
|
[
"hanno@hannosch.eu"
] |
hanno@hannosch.eu
|
c1763ba5175c207c332aa37cf46bb1aa24f588dd
|
a82aa8430e32eaf62df0f44b20afb0e7d50c3d7b
|
/ippon/group_phase/serializers.py
|
ed76683a6121808cf8e4196a5a8a7ebad298988f
|
[
"MIT"
] |
permissive
|
morynicz/ippon_back
|
314daac99f79247b749dc46d59a645a6eb840263
|
dce901bfc649c6f8efbbf0907654e0860606b3e3
|
refs/heads/master
| 2022-12-20T23:33:10.898738
| 2021-10-17T09:25:39
| 2021-10-17T09:25:39
| 124,851,931
| 0
| 2
|
MIT
| 2022-12-08T12:37:26
| 2018-03-12T07:43:17
|
Python
|
UTF-8
|
Python
| false
| false
| 307
|
py
|
from rest_framework import serializers
import ippon.models
class GroupPhaseSerializer(serializers.ModelSerializer):
class Meta:
model = ippon.models.group_phase.GroupPhase
fields = (
'id',
'tournament',
'fight_length',
'name'
)
|
[
"morynicz@gmail.com"
] |
morynicz@gmail.com
|
4f613303615c55d2729147ae5cb8c6cd97c4ca83
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02383/s686099688.py
|
8350ca148ab207ab25ea1b72f0d85043ae5cfc38
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 526
|
py
|
dice = input().split()
direction = list(input())
dice2 = []
for i in range(len(direction)):
dice2 = dice
if direction[i] == 'E':
dice = [dice2[3],dice2[1],dice2[0],dice2[5],dice2[4],dice2[2]]
elif direction[i] == 'N':
dice = [dice2[1],dice2[5],dice2[2],dice2[3],dice2[0],dice2[4]]
elif direction[i] == 'S':
dice = [dice2[4],dice2[0],dice2[2],dice2[3],dice2[5],dice2[1]]
else:
dice = [dice2[2],dice2[1],dice2[5],dice2[0],dice2[4],dice2[3]]
print('{0}'.format(int(dice[0])))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
a778be45a738c428c09f03ec65096a832a8df811
|
b66c83dbdb1181d3274cfb309637c0bdf590553f
|
/build/extrinsic_Industrial/intelligent_actuator/robo_cylinder/catkin_generated/pkg.develspace.context.pc.py
|
50404616a97e27fc816577449f0fc4dc7749b87a
|
[] |
no_license
|
Sinchiguano/repo_project
|
9079c80f6544cbe39902c68f61f421bd7cfd55e6
|
666da1d6d91704302b69ec9e0b0d30db3a709f30
|
refs/heads/master
| 2020-04-30T18:06:16.162025
| 2019-04-25T18:47:58
| 2019-04-25T18:47:58
| 177,000,277
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 499
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/casch/yumi_depends_ws/devel/include".split(';') if "/home/casch/yumi_depends_ws/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;rospy;std_msgs;message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "robo_cylinder"
PROJECT_SPACE_DIR = "/home/casch/yumi_depends_ws/devel"
PROJECT_VERSION = "0.0.0"
|
[
"cesarsinchiguano@hotmail.es"
] |
cesarsinchiguano@hotmail.es
|
c4228442ea7bb005562f42293f54db22ffd4a496
|
85738a4cacd2a6d93c4487cf856c883c3d9d314a
|
/tests/web/settings.py
|
9a68dbff38a085e8bd59eb1e7053c05ef3ac066a
|
[
"Apache-2.0"
] |
permissive
|
Kitware/tangelo
|
cc0cb1372bc5728e0585f739a9412a58a5069069
|
470034ee9b3d7a01becc1ce5fddc7adc1d5263ef
|
refs/heads/develop
| 2023-08-28T11:57:57.909917
| 2016-01-25T15:56:18
| 2016-01-25T15:56:18
| 6,885,877
| 40
| 21
|
Apache-2.0
| 2018-03-05T01:24:16
| 2012-11-27T15:38:26
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 320
|
py
|
import cherrypy
import tangelo
# This service reports the value of cherrypy's thread pool setting
def run(**kwargs):
if kwargs.get('pool'):
tangelo.util.set_server_setting('server.thread_pool', int(kwargs['pool']))
response = 'pool="%r"' % cherrypy.config.get('server.thread_pool')
return response
|
[
"roni.choudhury@kitware.com"
] |
roni.choudhury@kitware.com
|
718d0f430682a0b47ec3c0d95755ecd74fa8612a
|
7134e45563b2045837296cb5c4f1974a025e4f2b
|
/.history/MathmeticPracticeTimeLimit_20200411174234.py
|
16d18819de876f7987bf70910f5d9fe1a1e15342
|
[] |
no_license
|
Nordenbox/Nordenbox_Python_Fundmental
|
dca175c471ac2c64453cc4bcf291dd0773be4add
|
9c79fd5d0dada580072b523d5aa1d72f996e3a22
|
refs/heads/master
| 2022-01-21T06:37:15.084437
| 2022-01-06T13:55:30
| 2022-01-06T13:55:30
| 240,154,945
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,601
|
py
|
import random
import time
import os
import subprocess
def mode_choice():
print('请选用练习模式:\n1,选择练习题目数。\n')
print('2, 选择练习时间')
choice_num = int(input('您的选择: '))
if choice_num == 1:
multipile_issues()
else:
multipile_time()
def multipile_issues():
practicese_times = int(input('您要做几道题: '))
for i in range(practicese_times):
practicese_issues()
print("正确为%d,错误为%d。" % (practicese_issues.corrected, practicese_issues.wrong),
"你的分数是%d分" % (practicese_issues.corrected / practicese_times * 100))
if practicese_issues.sum_wrong_list != [] and practicese_issues.minos_wrong_list != []:
print("错误的题目是:\n", practicese_issues.sum_wrong_list,
"\n", practicese_issues.minos_wrong_list)
elif practicese_issues.sum_wrong_list == [] and practicese_issues.minos_wrong_list != []:
print("错误的题目是:\n", practicese_issues.minos_wrong_list)
elif practicese_issues.sum_wrong_list != [] and practicese_issues.minos_wrong_list == []:
print("错误的题目是:\n", practicese_issues.sum_wrong_list)
def multipile_time():
pass
def practicese_issues():
orrected = 0
wrong = 0
#counting = howmuch
sum_wrong_list = []
minos_wrong_list = []
sum = 0
minos = 0
while True:
plused = random.randint(1, 20)
plus = random.randint(1, 20)
p = random.randint(0, 1)
if p == 1:
sum = int(input("%d+%d= " % (plused, plus)))
if sum == plused + plus:
corrected = corrected + 1
else:
sum_wrong_list.append("%d+%d= %d" % (plused, plus, sum))
wrong = wrong + 1
else:
if plused < plus:
minos = int(input("%d-%d= " % (plus, plused)))
if minos == plus - plused:
corrected = corrected + 1
else:
minos_wrong_list.append(
"%d-%d=%d " % (plus, plused, minos))
wrong = wrong + 1
else:
minos = int(input("%d-%d= " % (plused, plus)))
if minos == plused - plus:
corrected = corrected + 1
else:
minos_wrong_list.append(
"%d-%d=%d " % (plused, plus, minos))
wrong = wrong + 1
return corrected, wrong, sum_wrong_list, minos_wrong_list
mode_choice()
|
[
"nordenbox@gmail.com"
] |
nordenbox@gmail.com
|
6dcb30fec438ec2e9fff5f0b0626da1774055b61
|
653eaef652627b155569b5fe9ab9bb3607fc1e78
|
/alg/discriminative-jackknife/models/BNN.py
|
3a17799407f89d0061443791a187ff674aeeaeab
|
[
"BSD-3-Clause"
] |
permissive
|
IlyaTrofimov/mlforhealthlabpub
|
11ab86a83bd2ffd2574364a956b322b0c62406ae
|
190cbad2faae9e559ffe7a68143df7f747d70adc
|
refs/heads/main
| 2023-04-16T03:58:38.423288
| 2021-04-21T10:22:43
| 2021-04-21T10:22:43
| 358,528,623
| 0
| 0
|
NOASSERTION
| 2021-04-16T08:25:26
| 2021-04-16T08:25:25
| null |
UTF-8
|
Python
| false
| false
| 1,534
|
py
|
import torch
import torch.nn as nn
class BNN(nn.Module):
def __init__(self, *layers):
super(BNN, self).__init__()
self.layers, self.params = [], nn.ParameterList()
for layer in layers:
self.layers.append(layer)
self.params.extend([*layer.parameters()]) # register module parameters
def forward(self, x, mode):
if mode == 'forward':
net_kl = 0
for layer in self.layers:
x, layer_kl = layer.forward(x, mode)
net_kl += layer_kl
return x, net_kl
else:
for layer in self.layers:
x = layer.forward(x, mode)
return x
def Forward(self, x, y, n_samples, type):
assert type in {'Gaussian', 'Softmax'}, 'Likelihood type not found'
# Sample N samples and average
total_kl, total_likelh = 0., 0.
for _ in range(n_samples):
out, kl = self.forward(x, mode='forward')
# Gaussian output (with unit var)
# lklh = torch.log(torch.exp(-(y - out) ** 2 / 2e-2) / math.sqrt(2e-2 * math.pi)).sum()
if type == 'Gaussian':
lklh = (-.5 * (y - out) ** 2).sum()
else: # softmax
lklh = torch.log(out.gather(1, y)).sum()
total_kl += kl
total_likelh += lklh
return total_kl / n_samples, total_likelh / n_samples
@staticmethod
def loss_fn(kl, lklh, n_batch):
return (kl / n_batch - lklh).mean()
|
[
"e.s.saveliev@gmail.com"
] |
e.s.saveliev@gmail.com
|
59b111e7c48d6899e63795c608a24e3d51ca5fb3
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02706/s901477236.py
|
e5cd388ef56bc4369f68551299e42a2e436c4640
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 260
|
py
|
#import numpy as np
#import math
#from decimal import *
#from numba import njit
#@njit
def main():
N,M = map(int, input().split())
A = list(map(int, input().split()))
s = sum(A)
if s > N:
print(-1)
else:
print(N-s)
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
010427a5856703a295156243fe70f85976250e8c
|
2f17bb840634eab6f08a7bb488781f6951ce6b47
|
/AOJ_courses/ITP1_4_D.py
|
42e76330f54037ab4673edbc5a65b21db34f3da8
|
[] |
no_license
|
NHRD/Atcoderpractice
|
3d5c1175e147a0bdbacf46f51b23db1a1b2dea22
|
958835069c84791afa36d119298b742d53e86ae0
|
refs/heads/master
| 2022-12-15T17:30:10.310049
| 2020-09-19T13:39:07
| 2020-09-19T13:39:07
| 279,771,996
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 157
|
py
|
n = int(input())
nums = list(map(int, input().split()))
nums = sorted(nums)
sumnum = sum(nums)
print("{} {} {}" .format(nums[0], nums[len(nums)-1], sumnum))
|
[
"naohisa.harada@gmail.com"
] |
naohisa.harada@gmail.com
|
80e9eeda1efc064f19b56be3222ec30e6dd1564d
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5744014401732608_1/Python/feigao/main.py
|
9d1ebc3970ed5aa77f1c7df72eca82287fe1e3c1
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,015
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
from functools import wraps, lru_cache
def io_wrapper(func):
@wraps(func)
def _func(in_file=None, out_file=None, lines_per_case=1):
in_buffers = []
if in_file is None:
while True:
try:
s = input()
if s.strip():
in_buffers.append(s.strip())
except:
break
else:
with open(in_file, 'r') as f:
in_buffers.extend([line.strip() for line in f.read().strip().splitlines()])
total_case_nums = int(in_buffers[0])
in_buffers = in_buffers[1:]
# print(in_buffers)
assert len(in_buffers) == total_case_nums * lines_per_case
out_buffers = []
for case_id in range(1, total_case_nums + 1):
case_result_str = func('\n'.join(in_buffers[(case_id - 1) * lines_per_case: case_id * lines_per_case]))
out_buffers.append('Case #{}: {}'.format(case_id, case_result_str))
if out_file is not None and os.path.exists(out_file):
print('Out file {} already exists!'.format(out_file), file=sys.stderr)
out_buffers = None
if out_file is None:
print('\n'.join(out_buffers))
else:
with open(out_file, 'w') as f:
f.write('\n'.join(out_buffers))
return _func
@io_wrapper
@lru_cache(maxsize=None)
def solution(line_str):
return "Answer Str"
@io_wrapper
def a(lines):
n, *parties = map(int, lines.split())
# print(n, parties)
resutls = []
total = sum(parties)
import string
names = string.ascii_uppercase[:n]
numbers = dict(zip(names, parties))
while total > 0:
m = max(numbers, key=lambda c: numbers[c])
resutls.append(m)
total -= 1
v = numbers[m]
if v == 1:
del numbers[m]
else:
numbers[m] = v - 1
if len(resutls) % 2 == 1:
resutls.insert(0, '')
return ' '.join(a + b for a, b in zip(resutls[::2], resutls[1::2]))
@io_wrapper
def b(lines):
# print(lines)
b, m = map(int, lines.split())
if m > 2 ** (b - 2):
return 'IMPOSSIBLE'
resp = 'POSSIBLE'
if m == 2 ** (b - 2):
matrix = [[1 if r < c else 0 for c in range(b)] for r in range(b)]
else:
matrix = [[1 if r < c < b - 1 else 0 for c in range(b)] for r in range(b)]
for r in range(b):
if m & (2 ** r):
matrix[r + 1][b - 1] = 1
return '\n'.join([resp] + [''.join(map(str, row)) for row in matrix])
def c():
pass
if __name__ == '__main__':
# solution()
# a('A-sample.txt', lines_per_case=2)
# a('A-small-attempt0.in', lines_per_case=2)
# a('A-large.in.txt', 'A-large.out.txt', lines_per_case=2)
# b('B-sample.txt')
# b('B-small-attempt0.in.txt', 'B-small-attempt0.out.txt')
b('B-large.in', 'B-large.out.txt')
pass
|
[
"alexandra1.back@gmail.com"
] |
alexandra1.back@gmail.com
|
e9880143e1cf66275f3cb00db8e80924fd0897d1
|
a2d36e471988e0fae32e9a9d559204ebb065ab7f
|
/huaweicloud-sdk-iam/huaweicloudsdkiam/v3/model/keystone_create_protocol_request.py
|
ad11aa561d4c11a287678808d35c64ee2c118655
|
[
"Apache-2.0"
] |
permissive
|
zhouxy666/huaweicloud-sdk-python-v3
|
4d878a90b8e003875fc803a61414788e5e4c2c34
|
cc6f10a53205be4cb111d3ecfef8135ea804fa15
|
refs/heads/master
| 2023-09-02T07:41:12.605394
| 2021-11-12T03:20:11
| 2021-11-12T03:20:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,463
|
py
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class KeystoneCreateProtocolRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'idp_id': 'str',
'protocol_id': 'str',
'body': 'KeystoneCreateProtocolRequestBody'
}
attribute_map = {
'idp_id': 'idp_id',
'protocol_id': 'protocol_id',
'body': 'body'
}
def __init__(self, idp_id=None, protocol_id=None, body=None):
"""KeystoneCreateProtocolRequest - a model defined in huaweicloud sdk"""
self._idp_id = None
self._protocol_id = None
self._body = None
self.discriminator = None
self.idp_id = idp_id
self.protocol_id = protocol_id
if body is not None:
self.body = body
@property
def idp_id(self):
"""Gets the idp_id of this KeystoneCreateProtocolRequest.
身份提供商ID。
:return: The idp_id of this KeystoneCreateProtocolRequest.
:rtype: str
"""
return self._idp_id
@idp_id.setter
def idp_id(self, idp_id):
"""Sets the idp_id of this KeystoneCreateProtocolRequest.
身份提供商ID。
:param idp_id: The idp_id of this KeystoneCreateProtocolRequest.
:type: str
"""
self._idp_id = idp_id
@property
def protocol_id(self):
"""Gets the protocol_id of this KeystoneCreateProtocolRequest.
待注册的协议ID。
:return: The protocol_id of this KeystoneCreateProtocolRequest.
:rtype: str
"""
return self._protocol_id
@protocol_id.setter
def protocol_id(self, protocol_id):
"""Sets the protocol_id of this KeystoneCreateProtocolRequest.
待注册的协议ID。
:param protocol_id: The protocol_id of this KeystoneCreateProtocolRequest.
:type: str
"""
self._protocol_id = protocol_id
@property
def body(self):
"""Gets the body of this KeystoneCreateProtocolRequest.
:return: The body of this KeystoneCreateProtocolRequest.
:rtype: KeystoneCreateProtocolRequestBody
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this KeystoneCreateProtocolRequest.
:param body: The body of this KeystoneCreateProtocolRequest.
:type: KeystoneCreateProtocolRequestBody
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, KeystoneCreateProtocolRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
18772b128b050a7ff3d1dabdbdd5d2091d72921c
|
2d94902e0367f364eabd038b4aa49ac34e1ebd47
|
/config.py
|
7e17e0f65019ab54b69a72e387746c5c258de67a
|
[
"Apache-2.0"
] |
permissive
|
qitianchan/Patap
|
25f642759698391c77527ed3f676a84fcf918023
|
e71bd95300d94f7e26f9d87e5bdb9f4c73175383
|
refs/heads/master
| 2021-01-20T20:03:52.546735
| 2016-07-27T12:01:31
| 2016-07-27T12:01:31
| 64,106,084
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 807
|
py
|
# Create dummy secrey key so we can use sessions
SECRET_KEY = '123456790'
# Create in-memory database
DATABASE_FILE = 'patap.sqlite'
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + DATABASE_FILE
SQLALCHEMY_ECHO = True
# Flask-Security config
# SECURITY_URL_PREFIX = "/admin"
SECURITY_PASSWORD_HASH = "pbkdf2_sha512"
SECURITY_PASSWORD_SALT = "ATGUOHAELKiubahiughaerGOJAEGj"
# Flask-Security URLs, overridden because they don't put a / at the end
SECURITY_LOGIN_URL = "/login/"
SECURITY_LOGOUT_URL = "/logout/"
SECURITY_REGISTER_URL = "/register/"
SECURITY_POST_LOGIN_VIEW = "/facilitator/"
SECURITY_POST_LOGOUT_VIEW = "/"
SECURITY_POST_REGISTER_VIEW = "/admin/"
# Flask-Security features
SECURITY_REGISTERABLE = True
SECURITY_SEND_REGISTER_EMAIL = False
# Blueprint prefix
FACILITATOR_PREFIX = '/facilitator'
|
[
"qitianchan@sina.com"
] |
qitianchan@sina.com
|
e87ad93a3d3dcdf7cfea8ea52573b0f8fcc1a35a
|
54290d468f91a21f131458b136213354a3780a46
|
/fabfile.py
|
2f37bf95d517421a6878a2eb07b905c48515e92f
|
[
"WTFPL"
] |
permissive
|
LaPosteSNA/addok
|
9435cccd37e936c89c538caed08f5db04e6bd0eb
|
22a4e407c5a94d412673d223a25a8a3651801a71
|
refs/heads/master
| 2021-01-22T13:13:37.465954
| 2016-01-26T17:02:01
| 2016-01-26T17:02:01
| 45,954,224
| 1
| 0
| null | 2015-11-11T02:15:32
| 2015-11-11T02:15:31
| null |
UTF-8
|
Python
| true
| false
| 3,886
|
py
|
from fabric.api import cd, env, execute, hide, puts, roles, sudo, task
env.project_name = 'addok'
env.repository = 'https://github.com/etalab/addok.git'
env.local_branch = 'master'
env.remote_ref = 'origin/master'
env.requirements_file = 'requirements.txt'
env.use_ssh_config = True
env.shell = "/bin/bash -c" # Default uses -l option that we don't want.
env.virtualenv_dir = '/home/addok/.virtualenvs/addok'
env.project_dir = '/home/addok/src/'
env.restart_command = 'sudo service addok restart'
def run_as_addok(*args, **kwargs):
"""
Run command sudoing user `addok`.
"""
kwargs['user'] = "addok"
return sudo(*args, **kwargs)
# =============================================================================
# Tasks which set up deployment environments
# =============================================================================
@task
def dev():
"""
Use the dev deployment environment on Etalab servers.
You need the "banapidev" server to be referenced in your ~/.ssh/config
file.
"""
server = 'banapidev'
env.roledefs = {
'web': [server],
}
env.system_users = {server: 'addok'}
@task
def live():
"""
Use the live deployment environment on Etalab servers.
You need the "banapi" server to be referenced in your ~/.ssh/config file.
"""
server = 'banapi'
env.roledefs = {
'web': [server],
}
env.system_users = {server: 'addok'}
# Set the default environment.
dev()
# =============================================================================
# Actual tasks
# =============================================================================
@task
@roles('web')
def setup():
"""
Install the service (tested on Ubuntu 14.04).
"""
sudo('apt install redis-server python3.4-dev python-virtualenv python-pip '
'virtualenvwrapper')
# run_as_addok('source /usr/local/bin/virtualenvwrapper.sh')
run_as_addok('mkvirtualenv addok --python=/usr/bin/python3.4')
run_as_addok('git clone {repository} {project_dir}'.format(**env))
with cd(env.project_dir):
run_as_addok('pip install -r {requirements_file}'.format(**env))
@task
@roles('web')
def restart():
"""
Restart the web service.
"""
run_as_addok(env.restart_command)
@task
@roles('web')
def update(action='check'):
"""
Update the repository (server-side).
"""
with cd(env.project_dir):
remote, dest_branch = env.remote_ref.split('/', 1)
run_as_addok('git fetch {remote}'.format(
remote=remote, dest_branch=dest_branch, **env))
with hide('running', 'stdout'):
cmd = 'git diff-index --cached --name-only {remote_ref}'
changed_files = run_as_addok(cmd.format(**env)).splitlines()
if not changed_files and action != 'force':
# No changes, we can exit now.
return
run_as_addok('git merge {remote_ref}'.format(**env))
run_as_addok('find -name "*.pyc" -delete')
if action == "clean":
run_as_addok('git clean -df')
execute(install)
@task
@roles('web')
def install():
"""
Update the requirements.
"""
puts('Installing...')
cmd = '{virtualenv_dir}/bin/python setup.py develop'
run_as_addok(cmd.format(**env))
@task
@roles('web')
def shell():
cmd = "{virtualenv_dir}/bin/python /home/addok/src/run.py shell"
run_as_addok(cmd.format(virtualenv_dir=env.virtualenv_dir))
@task
def deploy(verbosity='normal'):
"""
Full server deploy.
Updates the repository (server-side) and restarts the web service.
"""
if verbosity == 'noisy':
hide_args = []
else:
hide_args = ['running', 'stdout']
with hide(*hide_args):
puts('Updating repository...')
execute(update)
puts('Restarting web server...')
execute(restart)
|
[
"yb@enix.org"
] |
yb@enix.org
|
b3b1665fb21f6233aa62577bc888715c6c87326f
|
e5a044708032b853f1cdf8906da63502716fd410
|
/test/test_acs_response.py
|
50ae8c13ac7310108d49f9d76f8061799b9c37d7
|
[] |
no_license
|
GBSEcom/Python
|
4b93bab80476051fc99f379f018ac9fa109a8a6a
|
5fa37dba8d0c3853686fdc726f863743376060c9
|
refs/heads/master
| 2021-12-04T12:55:29.605843
| 2021-11-19T22:01:03
| 2021-11-19T22:01:03
| 136,058,345
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,064
|
py
|
# coding: utf-8
"""
Payment Gateway API Specification.
The documentation here is designed to provide all of the technical guidance required to consume and integrate with our APIs for payment processing. To learn more about our APIs please visit https://docs.firstdata.com/org/gateway. # noqa: E501
The version of the OpenAPI document: 21.5.0.20211029.001
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import openapi_client
from openapi_client.models.acs_response import ACSResponse # noqa: E501
from openapi_client.rest import ApiException
class TestACSResponse(unittest.TestCase):
"""ACSResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testACSResponse(self):
"""Test ACSResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = openapi_client.models.acs_response.ACSResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"emargules@bluepay.com"
] |
emargules@bluepay.com
|
763b3c41f750dca1f47c7fdc416ee621be024e3c
|
90cdae33e672b23a3ccb84dec0f281e78d3934ce
|
/auto_test_leke/web_test/src/common/method.py
|
038feba19e8b7600635d53982ce2dd7eead0e753
|
[] |
no_license
|
kuangtao94/TestHome
|
dffdb3737ab60f6db435c770c33f423d814b5594
|
46acedadd225b07fe73f43feebd5c66d19c7eeac
|
refs/heads/master
| 2020-06-29T07:38:37.316844
| 2019-11-24T02:14:25
| 2019-11-24T02:14:25
| 200,475,947
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,918
|
py
|
# coding:utf-8
from selenium import webdriver
import os
from selenium.webdriver.common.by import By #导入by定位
from selenium.webdriver.support.ui import WebDriverWait #导入显示等待包
from selenium.webdriver.support import expected_conditions as EC #导入期望条件
from logging import log #导入日志文件中的日志 类
# driver=webdriver.Chrome()
class public():
def __init__(self,driver): #创建对象时加入初始化参数,每次调用类时都会执行初始化参数
self.driver=driver
self.log=log('execute.log') #创建日志类对象,并初始化文件
def login(self,username,password):
self.input_text(By.NAME,"name",username)
self.input_text(By.NAME,"password",password)
self.click_element(By.NAME,"submit")
def logout(self):
self.click_element(By.XPATH,'')
def locat_element(self,*loc): #定义元素的方法
try:
element = WebDriverWait(self.driver,5,0.5).until(
EC.presence_of_element_located(loc)
)
return element
except:
self.log.error(u"元素找不到"+str(loc))
def input_text(self,a,b,text,clear=True): #输入框的方法
if clear:
try:
self.locat_element(a,b).clear()
self.locat_element(a,b).send_keys(text)
except:
self.log.error(u'文本输入失败'+str(text))
else:
try:
self.locat_element(a,b).send_keys(text)
except:
self.log.error(u'文本输入失败'+str(text))
def click_element(self,a,b): #点击元素的方法
try:
self.locat_element(a,b).click()
except:
self.log.error(u'点击失败'+str(b))
|
[
"1512500241@qq.com"
] |
1512500241@qq.com
|
94be3ed0169b5b1a099858c6d26cca996a1e3f6c
|
ee7ca0fed1620c3426fdfd22e5a82bba2a515983
|
/dsn_qc_pbsa/models/qc.py
|
49236e76f3e03ea571e2b17105d32c21ef6826d9
|
[] |
no_license
|
disna-sistemas/odoo
|
318d0e38d9b43bea56978fe85fc72850d597f033
|
0826091462cc10c9edc3cc29ea59c417f8e66c33
|
refs/heads/8.0
| 2022-03-08T19:01:21.162717
| 2022-02-15T13:06:26
| 2022-02-15T13:06:26
| 99,210,381
| 0
| 5
| null | 2019-07-24T08:49:58
| 2017-08-03T08:36:55
|
Python
|
UTF-8
|
Python
| false
| false
| 1,399
|
py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 OpenERP SA (<http://openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
from datetime import datetime
class dsnQcInspection(models.Model):
_inherit = "qc.inspection"
buf_date_analysis = fields.Date('Date of Analysis', readonly=True)
@api.multi
def action_confirm(self):
for inspection in self:
inspection.buf_date_analysis = datetime.now()
result = super(dsnQcInspection, self).action_confirm()
return result
|
[
"sistemas@disna.com"
] |
sistemas@disna.com
|
46823e51aef1425db664261b1bf8807eda1cf97f
|
ff21dd1b906db472584aa92a32c22fb9351c9ffd
|
/NOTE/02_PythonBase/day20/exercise/mycopy.py
|
84c9355038bbc272ced2e384557d1676f5064f61
|
[] |
no_license
|
Bertram-Liu/Note
|
0e176b2c9625f02e463b8f6be3587f1f0b873e9b
|
60a30b03ff5d41ab6233e6fd30074de396703b68
|
refs/heads/master
| 2020-07-18T18:14:24.920528
| 2019-09-04T11:55:59
| 2019-09-04T11:55:59
| 206,290,323
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 702
|
py
|
# 1. 写程序,实现复制文件的功能
# 要求:
# 1. 要考虑关闭文件的问题
# 2. 要考虑超大文件的问题
# 3. 要能复制二进制文件
def copy(src_file, dst_file):
'''src_file 源文件
dst_file 目标文件'''
# 以下实现复制
try:
with open(src_file, 'rb') as fr, \
open(dst_file, 'wb') as fw:
while True:
b = fr.read(4096)
if not b: # 到达文件尾
break
fw.write(b)
except OSError:
print("复制失败!")
src = input("请输入源文件名: ")
dst = input('请输入目标文件名: ')
copy(src, dst)
|
[
"bertram_liu@163.com"
] |
bertram_liu@163.com
|
b529d4e2ef137a416b1a7794a47c6e9eebffab3b
|
01ac9e40052a468dd472a296df0003c4e629e2c9
|
/news_all/spiders_ydyl/cppcc_all.py
|
4081ec07efd2853fce653816d12c6835d575b706
|
[] |
no_license
|
Pintrue/news_all
|
b5cee16584ed92e6574edd825b574214df65d917
|
eb8c32c79bdacd8e2f76b88f27871c3cd0118006
|
refs/heads/master
| 2022-03-23T13:34:10.354029
| 2019-11-22T07:40:50
| 2019-11-22T07:40:50
| 223,058,997
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,433
|
py
|
# -*- coding: utf-8 -*-
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import Rule
from news_all.spider_models import NewsRCSpider
class Zgzxw_allSpider(NewsRCSpider):
"""中国政协网"""
name = 'zgzxw'
mystart_urls = {
'http://www.cppcc.gov.cn/zxww/newcppcc/zxyw/index.shtml': 7642, # 中国人民政治协商会议全国委员会
}
rules = (
# http://www.cppcc.gov.cn/zxww/2019/06/25/ARTI1561421709036136.shtml
Rule(LinkExtractor(allow=(r'cppcc.gov.cn.*?/\d{4}/\d{2}/\d{2}/\w+\d+.shtml'),
), callback='parse_item',
follow=False),
)
def parse_item(self, response):
xp = response.xpath
try:
title = xp("//div[@class='cnt_box']/h3/text()").extract_first()
content_div = xp("//div[@class='cnt_box']/div[@class='con']")[0]
pubtime = xp("//span[@class='info']/i").re(r'\d{2,4}-\d{1,2}-\d{1,2}')[0]
origin_name = xp("//span[@class='info']/em/text()[2]").extract_first()
content, media, _, _ = self.content_clean(content_div)
except:
return self.produce_debugitem(response, "xpath error")
return self.produce_item(
response=response,
title=title,
pubtime=pubtime,
origin_name=origin_name,
content=content,
media=media
)
|
[
"py416@ic.ac.uk"
] |
py416@ic.ac.uk
|
22fe6252c1d33a331415ffaf644c9dbdb687b865
|
99b062cb9f5f3ff10c9f1fa00e43f6e8151a43a6
|
/algorithm/IM/반나누기.py
|
0f16cc1db1f921f7c87b424d4ce293b0641550ca
|
[] |
no_license
|
HSx3/TIL
|
92acc90758015c2e31660617bd927f7f100f5f64
|
981c9aaaf09c930d980205f68a28f2fc8006efcb
|
refs/heads/master
| 2020-04-11T21:13:36.239246
| 2019-05-08T08:18:03
| 2019-05-08T08:18:03
| 162,099,042
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 783
|
py
|
import sys
sys.stdin = open("반나누기.txt")
T = int(input())
for tc in range(1, T+1):
N, Kmin, Kmax = map(int, input().split())
score = list(map(int, input().split()))
div_class = []
for T1 in range(1, 101):
for T2 in range(T1+1, 101):
A = []
B = []
C = []
for i in score:
if i >= T2:
A.append(i)
elif i < T1:
C.append(i)
else:
B.append(i)
num = [len(A), len(B), len(C)]
if max(num) <= Kmax and min(num) >= Kmin:
ans = max(num)-min(num)
div_class.append(ans)
if div_class:
print(min(div_class))
else:
print(-1)
|
[
"hs.ssafy@gmail.com"
] |
hs.ssafy@gmail.com
|
2294bcc3211c94ad16f0784191f9eb000b41fb76
|
23805cffc86ac4dfb5bcce672b8c7070b4616e41
|
/Apprendre-Python/sum-1-n/scripts/feedback.py
|
638e0fc434e034cca499389e17e3f7f853abd000
|
[] |
no_license
|
ukonline/pythia-tasks
|
f90ff90299fe0eedd0e2787bcf666df07c709a00
|
81a3731eb0cdfe16b26a4e75a165a5071fb48ff5
|
refs/heads/master
| 2021-01-25T03:26:33.915795
| 2016-01-04T20:03:24
| 2016-01-04T20:03:24
| 40,974,655
| 0
| 2
| null | 2016-12-21T13:12:14
| 2015-08-18T13:49:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,247
|
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Course: Apprendre Python
# Problem: Somme de 1 à n
# Feedback script
import ast
import csv
import json
import os
import sys
sys.path.append('/task/static')
from lib import pythia
import math
def computesum(n):
result = 0
i = 1
while i <= n:
result += i
i += 1
return result
class TaskFeedbackSuite(pythia.FeedbackSuite):
def __init__(self, config):
pythia.FeedbackSuite.__init__(self, '/tmp/work/output/stderr', None, '/tmp/work/input/data.csv', '/tmp/work/output/data.res', config)
def teacherCode(self, data):
return computesum(data)
def parseTestData(self, data):
return int(data[0])
# Retrieve task id
with open('/tmp/work/tid', 'r', encoding='utf-8') as file:
tid = file.read()
output = {'tid': tid, 'status': 'failed', 'feedback': {'score': 0}}
# Read test configuration
config = []
with open('/task/config/test.json', 'r', encoding='utf-8') as file:
content = file.read()
config = json.loads(content)
config = config['predefined']
(verdict, feedback) = TaskFeedbackSuite(config).generate()
output['feedback'] = feedback
output['status'] = 'success' if verdict else 'failed'
print(json.dumps(output))
|
[
"seb478@gmail.com"
] |
seb478@gmail.com
|
d7db13e9901dfdb2541b150c96b70055368e00ee
|
cf720b69d428b92186e84e52ff4f7eb39b8dd723
|
/Probablity and Statistics/3. WAP to find the probability of drawing an ace after drawing an ace on the first draw.py
|
d831885a30876fdd223cd5f274b1fb19dbec87e6
|
[] |
no_license
|
bl-deepakchawla/ML-Followship-Program
|
b0fd2232f6dd2ea4356e4402be86cca84a5fbd60
|
41d88172ea226c42c1f56fd9e59769142575734c
|
refs/heads/master
| 2020-04-01T13:56:11.595143
| 2018-10-31T10:28:19
| 2018-10-31T10:28:19
| 153,273,456
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
def pr_ace_after_ace_draw(l_ace_cards, l_total_cards):
l_pr_ace_card = (l_ace_cards/l_total_cards) * 100
return l_pr_ace_card
g_total_cards = 52
g_ace_draw = 1
g_total_cards = g_total_cards - g_ace_draw
g_ace_cards = 4 - g_ace_draw
g_pr_ace_card = pr_ace_after_ace_draw(g_ace_cards, g_total_cards)
print("Probability of the ace cards after drawing a ace from the packed card is", g_pr_ace_card, "%")
|
[
"deepakchawla35@gmail.com"
] |
deepakchawla35@gmail.com
|
8138e1872ba83e8b4e5232c1d3cc450e30f9a153
|
527cc44efaa5a2d738d638d76cf4737b37a0e27d
|
/fiepipe.py
|
3302504191367767067a54043f89092e1dc52f4e
|
[
"MIT"
] |
permissive
|
leith-bartrich/fiepipe
|
c98a978d81a24013a98bbae97c65ca053e9af481
|
2f48054a349059ec5919ff9402a02c03b27b5915
|
refs/heads/master
| 2021-04-06T13:52:51.391039
| 2019-06-25T21:01:39
| 2019-06-25T21:01:39
| 125,394,369
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 565
|
py
|
#!/usr/local/bin/python
import fiepipelib.localplatform.routines.localplatform
import fiepipelib.localuser.routines.localuser
import fiepipedesktoplib.shells.fiepipe
def main():
# TODO register fie.us and populate the public key from somewhere authoritative.
platform = fiepipelib.localplatform.routines.localplatform.get_local_platform_routines()
localuser = fiepipelib.localuser.routines.localuser.LocalUserRoutines(platform)
shell = fiepipedesktoplib.shells.fiepipe.Shell(localuser)
shell.cmdloop()
if __name__ == "__main__":
main()
|
[
"brad@fie.us"
] |
brad@fie.us
|
2f9ff8e134eaa96dab94a8adf57b27fb8a23be23
|
d6cf604d393a22fc5e071a0d045a4fadcaf128a6
|
/Challenge Book/JOI_2007_C.py
|
7e53612c30d66f59e862f33ab20efe583c393bc7
|
[] |
no_license
|
shikixyx/AtCoder
|
bb400dfafd3745c95720b9009881e07bf6b3c2b6
|
7e402fa82a96bc69ce04b9b7884cb9a9069568c7
|
refs/heads/master
| 2021-08-03T21:06:45.224547
| 2021-07-24T11:58:02
| 2021-07-24T11:58:02
| 229,020,968
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 444
|
py
|
import numpy as np
import sys
sys.setrecursionlimit(10 ** 7)
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
# 半分全列挙
N, M = map(int, readline().split())
P = np.array([0] + read().split(), np.int64)
P = P[P <= M]
P2 = (P[:, None] + P[None, :]).ravel()
P2 = P2[P2 <= M]
P2.sort()
# numpyで並列にやらないと間に合わない
I = np.searchsorted(P2, M-P2, side='right') - 1
P4 = P2 + P2[I]
print(max(P4))
|
[
"shiki.49.313@gmail.com"
] |
shiki.49.313@gmail.com
|
5aa39cd29ff6236b62f61c9c3f51364eea44b3a5
|
419db2c95082e57eab3ebea9568693c2a961add7
|
/pyverilog/dataflow/dataflow_analyzer.py
|
8136f89271efed5ed13e2089e74ec92349f3bf33
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
hoangt/Pyverilog-1
|
ef309528150a3174e74126b2f1328ca9824fcbfe
|
d0cb60a5633e88f59a7c5c36dff8981f000ee525
|
refs/heads/master
| 2021-01-19T22:53:52.170548
| 2015-09-16T15:23:16
| 2015-09-16T15:23:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,027
|
py
|
#-------------------------------------------------------------------------------
# dataflow_analyzer.py
#
# Verilog module signal/module dataflow analyzer
#
# Copyright (C) 2013, Shinya Takamaeda-Yamazaki
# License: Apache 2.0
#-------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
import subprocess
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) )
sys.setrecursionlimit(16 * 1024)
import pyverilog
import pyverilog.utils
import pyverilog.utils.version
from pyverilog.vparser.parser import VerilogCodeParser
from pyverilog.dataflow.modulevisitor import ModuleVisitor
from pyverilog.dataflow.signalvisitor import SignalVisitor
from pyverilog.dataflow.bindvisitor import BindVisitor
class VerilogDataflowAnalyzer(VerilogCodeParser):
def __init__(self, filelist, topmodule='TOP', noreorder=False, nobind=False,
preprocess_include=None,
preprocess_define=None):
self.topmodule = topmodule
self.terms = {}
self.binddict = {}
self.frametable = None
files = filelist if isinstance(filelist, tuple) or isinstance(filelist, list) else [ filelist ]
VerilogCodeParser.__init__(self, files,
preprocess_include=preprocess_include,
preprocess_define=preprocess_define)
self.noreorder = noreorder
self.nobind = nobind
def generate(self):
ast = self.parse()
module_visitor = ModuleVisitor()
module_visitor.visit(ast)
modulenames = module_visitor.get_modulenames()
moduleinfotable = module_visitor.get_moduleinfotable()
signal_visitor = SignalVisitor(moduleinfotable, self.topmodule)
signal_visitor.start_visit()
frametable = signal_visitor.getFrameTable()
if self.nobind:
self.frametable = frametable
return
bind_visitor = BindVisitor(moduleinfotable, self.topmodule, frametable,
noreorder=self.noreorder)
bind_visitor.start_visit()
dataflow = bind_visitor.getDataflows()
self.frametable = bind_visitor.getFrameTable()
self.terms = dataflow.getTerms()
self.binddict = dataflow.getBinddict()
def getFrameTable(self):
return self.frametable
#-------------------------------------------------------------------------
def getInstances(self):
if self.frametable is None: return ()
return self.frametable.getAllInstances()
def getSignals(self):
if self.frametable is None: return ()
return self.frametable.getAllSignals()
def getConsts(self):
if self.frametable is None: return ()
return self.frametable.getAllConsts()
def getTerms(self):
return self.terms
def getBinddict(self):
return self.binddict
if __name__ == '__main__':
from optparse import OptionParser
INFO = "Verilog module signal/module dataflow analyzer"
VERSION = pyverilog.utils.version.VERSION
USAGE = "Usage: python dataflow_analyzer.py -t TOPMODULE file ..."
def showVersion():
print(INFO)
print(VERSION)
print(USAGE)
sys.exit()
optparser = OptionParser()
optparser.add_option("-v","--version",action="store_true",dest="showversion",
default=False,help="Show the version")
optparser.add_option("-I","--include",dest="include",action="append",
default=[],help="Include path")
optparser.add_option("-D",dest="define",action="append",
default=[],help="Macro Definition")
optparser.add_option("-t","--top",dest="topmodule",
default="TOP",help="Top module, Default=TOP")
optparser.add_option("--nobind",action="store_true",dest="nobind",
default=False,help="No binding traversal, Default=False")
optparser.add_option("--noreorder",action="store_true",dest="noreorder",
default=False,help="No reordering of binding dataflow, Default=False")
(options, args) = optparser.parse_args()
filelist = args
if options.showversion:
showVersion()
for f in filelist:
if not os.path.exists(f): raise IOError("file not found: " + f)
if len(filelist) == 0:
showVersion()
verilogdataflowanalyzer = VerilogDataflowAnalyzer(filelist, options.topmodule,
noreorder=options.noreorder,
nobind=options.nobind,
preprocess_include=options.include,
preprocess_define=options.define)
verilogdataflowanalyzer.generate()
directives = verilogdataflowanalyzer.get_directives()
print('Directive:')
for dr in sorted(directives, key=lambda x:str(x)):
print(dr)
instances = verilogdataflowanalyzer.getInstances()
print('Instance:')
for module, instname in sorted(instances, key=lambda x:str(x[1])):
print((module, instname))
if options.nobind:
print('Signal:')
signals = verilogdataflowanalyzer.getSignals()
for sig in signals:
print(sig)
print('Const:')
consts = verilogdataflowanalyzer.getConsts()
for con in consts:
print(con)
else:
terms = verilogdataflowanalyzer.getTerms()
print('Term:')
for tk, tv in sorted(terms.items(), key=lambda x:str(x[0])):
print(tv.tostr())
binddict = verilogdataflowanalyzer.getBinddict()
print('Bind:')
for bk, bv in sorted(binddict.items(), key=lambda x:str(x[0])):
for bvi in bv:
print(bvi.tostr())
|
[
"shta.ky1018@gmail.com"
] |
shta.ky1018@gmail.com
|
30e1bb070924905323da2b3adf333239477b7c6d
|
8bac6c63a7b826bfd6b415e6faa51ff22505d112
|
/openNFR-skin-2018/usr/lib/enigma2/python/Components/Renderer/NFRSambaVpnIp_renderer.py
|
fabdc9a68e371461e2f463086cbfa48cd40c720c
|
[] |
no_license
|
stein17/Skins-for-openNFR
|
03330102b7d883f8485297cea0468143d9116b6f
|
ca6c5d02035e4bacdad6efc45995249a317b7fb6
|
refs/heads/master
| 2023-03-12T19:58:50.112456
| 2022-03-21T07:02:03
| 2022-03-21T07:02:03
| 94,653,786
| 0
| 15
| null | 2023-03-02T17:52:37
| 2017-06-17T23:05:42
|
Python
|
UTF-8
|
Python
| false
| false
| 467
|
py
|
from Renderer import Renderer
from enigma import ePixmap
class NFRSambaVpnIp_renderer(Renderer):
def __init__(self):
Renderer.__init__(self)
GUI_WIDGET = ePixmap
def postWidgetCreate(self, instance):
self.changed((self.CHANGED_DEFAULT,))
def changed(self, what):
if what[0] != self.CHANGED_CLEAR:
if self.source and hasattr(self.source, "pixmap"):
if self.instance:
self.instance.setScale(1)
self.instance.setPixmap(self.source.pixmap)
|
[
"lutz.f.kroll@gmail.com"
] |
lutz.f.kroll@gmail.com
|
55cef7ba6f4bab4b32350b0bcbab3ce2d4c00d12
|
8f8ac99fd3ed9ceb36778b404f6fdd0b6899d3f4
|
/development-support/wheel-smoke-test.py
|
fc47c188998f8298c69edac63841101e78c93158
|
[] |
no_license
|
strogo/pyobjc
|
ac4201c7742eb75348328eeecb7eedf4e3458de3
|
2579c5eaf44b0c5af77ee195c417d2c65e72dfda
|
refs/heads/master
| 2023-07-13T00:41:56.448005
| 2021-08-24T06:42:53
| 2021-08-24T06:42:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,668
|
py
|
"""
Helper script for performing smoke tests on an installed
copy of PyObjC
# NOTE: This file is not yet complete
"""
import platform
from distutils.version import LooseVersion
import objc # noqa: F401
from AddressBook import * # noqa: F401, F403
from AppleScriptKit import * # noqa: F401, F403
from AppleScriptObjC import * # noqa: F401, F403
from ApplicationServices import * # noqa: F401, F403
from Automator import * # noqa: F401, F403
from CFNetwork import * # noqa: F401, F403
from Cocoa import * # noqa: F401, F403
from CoreData import * # noqa: F401, F403
from CoreServices import * # noqa: F401, F403
from DiskArbitration import * # noqa: F401, F403
from ExceptionHandling import * # noqa: F401, F403
from GameController import * # noqa: F401, F403
from HIServices import * # noqa: F401, F403
from Quartz import * # noqa: F401, F403
sys_version = LooseVersion(platform.mac_ver()[0])
if sys_version >= LooseVersion("10.5"):
from CalendarStore import * # noqa: F401, F403
from Collaboration import * # noqa: F401, F403
from CoreText import * # noqa: F401, F403
from DictionaryServices import * # noqa: F401, F403
from FSEvents import * # noqa: F401, F403
if sys_version >= LooseVersion("10.6"):
from CoreLocation import * # noqa: F401, F403
from CoreWLAN import * # noqa: F401, F403
from iTunesLibrary import * # noqa: F401, F403
if sys_version >= LooseVersion("10.7"):
from AVFoundation import * # noqa: F401, F403
if sys_version >= LooseVersion("10.8"):
from Accounts import * # noqa: F401, F403
from EventKit import * # noqa: F401, F403
from GameCenter import * # noqa: F401, F403
if sys_version >= LooseVersion("10.9"):
from AVKit import * # noqa: F401, F403
if sys_version >= LooseVersion("10.10"):
from CloudKit import * # noqa: F401, F403
from CoreBluetooth import * # noqa: F401, F403
from CryptoTokenKit import * # noqa: F401, F403
from FinderSync import * # noqa: F401, F403
if sys_version >= LooseVersion("10.11"):
from Contacts import * # noqa: F401, F403
from ContactsUI import * # noqa: F401, F403
if sys_version >= LooseVersion("10.12"):
from Intents import * # noqa: F401, F403
from MediaPlayer import * # noqa: F401, F403
if sys_version >= LooseVersion("10.13"):
from BusinessChat import * # noqa: F401, F403
from ColorSync import * # noqa: F401, F403
from CoreML import * # noqa: F401, F403
from CoreSpotlight import * # noqa: F401, F403
from ExternalAccessory import * # noqa: F401, F403
from Vision import * # noqa: F401, F403
print("")
print("SMOKE TEST PASSED")
print("")
|
[
"ronaldoussoren@mac.com"
] |
ronaldoussoren@mac.com
|
a59b69de96b87c4963e2a5082a415c273af284f3
|
2da8bcfb9a72e507812a8723e38ad6d030c300f1
|
/two_sum_1.py
|
57a56e2fc2bce51207bff8c27039bcaa0809aea2
|
[] |
no_license
|
aditya-doshatti/Leetcode
|
1a4e0f391a7d6ca2d7f8fdc35e535f4ec10fb634
|
eed20da07896db471ea6582785335e52d4f04f85
|
refs/heads/master
| 2023-04-06T02:18:57.287263
| 2023-03-17T03:08:42
| 2023-03-17T03:08:42
| 218,408,346
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 995
|
py
|
'''
1. Two Sum
Easy
Given an array of integers, return indices of the two numbers such that they add up to a specific target.
You may assume that each input would have exactly one solution, and you may not use the same element twice.
Example:
Given nums = [2, 7, 11, 15], target = 9,
Because nums[0] + nums[1] = 2 + 7 = 9,
return [0, 1].
https://leetcode.com/problems/two-sum/
'''
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
if len(nums) <= 1:
return False
buff_dict = {}
for i in range(len(nums)):
if nums[i] in buff_dict:
return [buff_dict[nums[i]], i]
else:
buff_dict[target - nums[i]] = i
# for i in range(len(nums)):
# for j in range(i+1,len(nums)):
# if (nums[i] + nums[j]) == target:
# return [i,j]
|
[
"aditya.doshatti@sjsu.edu"
] |
aditya.doshatti@sjsu.edu
|
bf7fcf88de1f76cb2ade9e1d5ad78bb6c9d5350f
|
8ae0bf166da68488efec84fe79063f874687b332
|
/tests/ext/django/settings.py
|
47565381bb011dbafa23c965dd6905ecef88234f
|
[
"BSD-3-Clause"
] |
permissive
|
snopoke/slycache
|
75aafcd0fb9c9289a292c907f48159652e275f93
|
412e9e81a8c53f684f3fd7c5dafb6b06ecfbd0c0
|
refs/heads/main
| 2023-06-01T19:20:12.847651
| 2021-03-16T20:30:28
| 2021-03-16T20:30:28
| 340,279,785
| 0
| 0
|
NOASSERTION
| 2021-06-14T07:26:50
| 2021-02-19T06:34:25
|
Python
|
UTF-8
|
Python
| false
| false
| 494
|
py
|
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
},
}
INSTALLED_APPS = ['slycache.ext.django']
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'TIMEOUT': 60,
'LOCATION': 'location-1',
},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'TIMEOUT': 60,
'LOCATION': 'location-2',
},
}
SECRET_KEY = 'foobarbaz'
|
[
"skelly@dimagi.com"
] |
skelly@dimagi.com
|
5171d6acac1e78b70a50a813ff700ead3317d7d9
|
7b7c570b30d6d7a0e9b904c7cb378cfb0d0f0e07
|
/mlflow/models/flavor_backend.py
|
894829a8cfcb69840c6c12785cdff07fc6a9cbb5
|
[
"Apache-2.0"
] |
permissive
|
mlflow/mlflow
|
ca97bfbbf32f8e59f454e428f5e46eb3d34d062f
|
37298ffafcd34002352d01d579d4524790544267
|
refs/heads/master
| 2023-09-01T13:15:53.902815
| 2023-09-01T09:00:42
| 2023-09-01T09:00:42
| 136,202,695
| 14,102
| 3,748
|
Apache-2.0
| 2023-09-14T21:52:42
| 2018-06-05T16:05:58
|
Python
|
UTF-8
|
Python
| false
| false
| 3,421
|
py
|
from abc import ABCMeta, abstractmethod
from mlflow.utils.annotations import developer_stable
@developer_stable
class FlavorBackend:
"""
Abstract class for Flavor Backend.
This class defines the API interface for local model deployment of MLflow model flavors.
"""
__metaclass__ = ABCMeta
def __init__(self, config, **kwargs): # pylint: disable=unused-argument
self._config = config
@abstractmethod
def predict(self, model_uri, input_path, output_path, content_type):
"""
Generate predictions using a saved MLflow model referenced by the given URI.
Input and output are read from and written to a file or stdin / stdout.
:param model_uri: URI pointing to the MLflow model to be used for scoring.
:param input_path: Path to the file with input data. If not specified, data is read from
stdin.
:param output_path: Path to the file with output predictions. If not specified, data is
written to stdout.
:param content_type: Specifies the input format. Can be one of {``json``, ``csv``}
"""
pass
@abstractmethod
def serve(
self,
model_uri,
port,
host,
timeout,
enable_mlserver,
synchronous=True,
stdout=None,
stderr=None,
):
"""
Serve the specified MLflow model locally.
:param model_uri: URI pointing to the MLflow model to be used for scoring.
:param port: Port to use for the model deployment.
:param host: Host to use for the model deployment. Defaults to ``localhost``.
:param timeout: Timeout in seconds to serve a request. Defaults to 60.
:param enable_mlserver: Whether to use MLServer or the local scoring server.
:param synchronous: If True, wait until server process exit and return 0, if process exit
with non-zero return code, raise exception.
If False, return the server process `Popen` instance immediately.
:param stdout: Redirect server stdout
:param stderr: Redirect server stderr
"""
pass
def prepare_env(self, model_uri, capture_output=False):
"""
Performs any preparation necessary to predict or serve the model, for example
downloading dependencies or initializing a conda environment. After preparation,
calling predict or serve should be fast.
"""
pass
@abstractmethod
def build_image(self, model_uri, image_name, install_mlflow, mlflow_home, enable_mlserver):
raise NotImplementedError
@abstractmethod
def generate_dockerfile(
self, model_uri, output_path, install_mlflow, mlflow_home, enable_mlserver
):
raise NotImplementedError
@abstractmethod
def can_score_model(self):
"""
Check whether this flavor backend can be deployed in the current environment.
:return: True if this flavor backend can be applied in the current environment.
"""
pass
def can_build_image(self):
"""
:return: True if this flavor has a `build_image` method defined for building a docker
container capable of serving the model, False otherwise.
"""
return callable(getattr(self.__class__, "build_image", None))
|
[
"noreply@github.com"
] |
mlflow.noreply@github.com
|
4a6527f8ab80096d974ecbc2592c03ee486098cf
|
6632896b4e320c932bdaa98b2caa16e057905333
|
/utils/io/labels/character.py
|
42500f3bb08e1952e8f60fcd45e82f1d64eb7841
|
[
"MIT"
] |
permissive
|
sky1170447398/tensorflow_end2end_speech_recognition
|
56229b5de62c8a4580a9d349afe6ccdf20d478fb
|
7ef52ae702db3852a7339136852bb14585e55b3b
|
refs/heads/master
| 2021-07-13T14:26:22.117311
| 2017-10-12T00:51:12
| 2017-10-12T00:51:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,507
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
class Char2idx(object):
"""Convert from character to index.
Args:
map_file_path (string): path to the mapping file
"""
def __init__(self, str_char, map_file_path):
# Read the mapping file
self.map_dict = {}
with open(map_file_path, 'r') as f:
for line in f:
line = line.strip().split()
self.map_dict[line[0]] = int(line[1])
def __call__(self, str_char):
"""
Args:
str_char (string): a sequence of characters
Returns:
index_list (list): character indices
"""
char_list = list(str_char)
# Convert from character to index
index_list = list(map(lambda x: self.map_dict[x], char_list))
return np.array(index_list)
class Kana2idx(object):
"""Convert from kana character to index.
Args:
map_file_path (string): path to the mapping file
"""
def __init__(self, map_file_path):
# Read the mapping file
self.map_dict = {}
with open(map_file_path, 'r') as f:
for line in f:
line = line.strip().split()
self.map_dict[line[0]] = int(line[1])
def __call__(self, str_char):
"""
Args:
str_char (string): a sequence of kana characters
Returns:
index_list (list): kana character indices
"""
kana_list = list(str_char)
index_list = []
for i in range(len(kana_list)):
# Check whether next kana character is a double consonant
if i != len(kana_list) - 1:
if kana_list[i] + kana_list[i + 1] in self.map_dict.keys():
index_list.append(
int(self.map_dict[kana_list[i] + kana_list[i + 1]]))
i += 1
elif kana_list[i] in self.map_dict.keys():
index_list.append(int(self.map_dict[kana_list[i]]))
else:
raise ValueError(
'There are no kana character such as %s' % kana_list[i])
else:
if kana_list[i] in self.map_dict.keys():
index_list.append(int(self.map_dict[kana_list[i]]))
else:
raise ValueError(
'There are no kana character such as %s' % kana_list[i])
return np.array(index_list)
class Idx2char(object):
"""Convert from index to character.
Args:
map_file_path (string): path to the mapping file
capital_divide (bool, optional): set True when using capital-divided
character sequences
space_mark (string): the space mark to divide a sequence into words
"""
def __init__(self, map_file_path, capital_divide=False, space_mark=' '):
self.capital_divide = capital_divide
self.space_mark = space_mark
# Read the mapping file
self.map_dict = {}
with open(map_file_path, 'r') as f:
for line in f:
line = line.strip().split()
self.map_dict[int(line[1])] = line[0]
def __call__(self, index_list, padded_value=-1):
"""
Args:
index_list (np.ndarray): list of character indices.
Batch size 1 is expected.
padded_value (int): the value used for padding
Returns:
str_char (string): a sequence of characters
"""
# Remove padded values
assert type(index_list) == np.ndarray, 'index_list should be np.ndarray.'
index_list = np.delete(index_list, np.where(
index_list == padded_value), axis=0)
# Convert from indices to the corresponding characters
char_list = list(map(lambda x: self.map_dict[x], index_list))
if self.capital_divide:
char_list_tmp = []
for i in range(len(char_list)):
if i != 0 and 'A' <= char_list[i] <= 'Z':
char_list_tmp += [self.space_mark, char_list[i].lower()]
else:
char_list_tmp += [char_list[i].lower()]
str_char = ''.join(char_list_tmp)
else:
str_char = ''.join(char_list)
return str_char
# TODO: change to batch version
|
[
"hiro.mhbc@gmail.com"
] |
hiro.mhbc@gmail.com
|
17e98671744e907b39b79ee00bceb8d098905ea7
|
c0792645c156cb9e20a1aa2b28c565150358bc6e
|
/apps/inmueble/migrations/0017_auto_20180526_0412.py
|
8ddac3048c867751bb7c7635c709ef1ddbd2cc4d
|
[] |
no_license
|
clioo/Praver
|
b22fd92886e0399845adb4366663cae6a7d7853b
|
523f0d78e0a2039a5bae3e539c93e2c2415a0840
|
refs/heads/master
| 2020-03-11T12:38:54.272392
| 2018-06-28T18:24:21
| 2018-06-28T18:24:21
| 130,003,043
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2018-05-26 10:12
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('inmueble', '0016_auto_20180526_0410'),
]
operations = [
migrations.AlterModelOptions(
name='localidades',
options={'managed': False},
),
]
|
[
"jesus_acosta1996@hotmail.com"
] |
jesus_acosta1996@hotmail.com
|
823e85e0dfe556e154f26652a500ed91838d9a13
|
8fa8ded3772dd7a124c1bbb91fc109ed2b63574b
|
/mycelium/apps/volunteers/tests/selenium_abstractions.py
|
5cda3d87e310f4732fb2b14444382ce6e7a8d3c9
|
[] |
no_license
|
skoczen/mycelium
|
3642b0f5e5ea03d609a3e499c7ad68092101dce0
|
da0f169163f4dc93e2dc2b0d934abf4f18c18af0
|
refs/heads/master
| 2020-04-10T09:21:46.893254
| 2014-05-20T02:27:06
| 2014-05-20T02:27:06
| 2,114,887
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,479
|
py
|
import time
from test_factory import Factory
class VolunteerTestAbstractions(object):
def create_new_volunteer(self):
sel = self.selenium
self.create_john_smith_and_verify()
sel.click("css=.detail_tab[href=#volunteer]")
time.sleep(1)
assert sel.is_text_present("No volunteer shifts yet")
def create_new_volunteer_with_one_shift(self):
sel = self.selenium
self.create_new_volunteer()
sel.click("css=.detail_tab[href=#volunteer]")
time.sleep(1)
assert sel.is_text_present("No volunteer shifts yet")
sel.click("css=tabbed_box[name=add_a_volunteer_shift] tab_title")
sel.type("css=#id_duration", 4)
sel.type("css=#id_date", "2/11/2011")
sel.click("css=tabbed_box[name=add_a_volunteer_shift] .add_shift_btn")
time.sleep(1)
self.assertEqual("4 hours", sel.get_text("css=.volunteer_shift_table:nth(0) .completed_volunteer_shift_row .duration"))
self.assertEqual("Feb. 11, 2011", sel.get_text("css=.volunteer_shift_table:nth(0) .completed_volunteer_shift_row .date"))
# self.assertEqual("on an unscheduled shift.", sel.get_text("css=.volunteer_shift_table:nth(0) .completed_volunteer_shift_row .shift"))
self.assertEqual("2011", sel.get_text("css=.year_overview:nth(0) .year"))
self.assertEqual("1 shift", sel.get_text("css=.year_overview:nth(0) .total_shifts"))
self.assertEqual("4 hours", sel.get_text("css=.year_overview:nth(0) .total_hours"))
sel.click("link=See details")
self.assertEqual("4 hours", sel.get_text("css=.year_of_shifts:nth(0) .year_of_volunteer_shifts_table .completed_volunteer_shift_row .duration"))
self.assertEqual("Feb. 11, 2011", sel.get_text("css=.year_of_shifts:nth(0) .year_of_volunteer_shifts_table .completed_volunteer_shift_row .date"))
# self.assertEqual("on an unscheduled shift.", sel.get_text("css=.year_of_shifts:nth(0) .year_of_volunteer_shifts_table .completed_volunteer_shift_row .shift"))
def add_a_new_shift(self, hours=None, date=None):
sel = self.selenium
if not hours:
hours = Factory.rand_int(1,10)
sel.click("css=tabbed_box[name=add_a_volunteer_shift] tab_title")
sel.type("css=#id_duration", hours)
if date:
sel.type("css=#id_date", date)
sel.click("css=tabbed_box[name=add_a_volunteer_shift] .add_shift_btn")
time.sleep(2)
|
[
"steven@quantumimagery.com"
] |
steven@quantumimagery.com
|
f19711c824d08b2f99bde202875be60d5015bc4a
|
dfc827bf144be6edf735a8b59b000d8216e4bb00
|
/CODE/experimentcode/DryBedPaper/Dambreak/FEVMdryWBuhonly zeroorder/Run.py
|
12accac582bfd8b9937706f72632e59e0bb7acdc
|
[] |
no_license
|
jordanpitt3141/ALL
|
c5f55e2642d4c18b63b4226ddf7c8ca492c8163c
|
3f35c9d8e422e9088fe096a267efda2031ba0123
|
refs/heads/master
| 2020-07-12T16:26:59.684440
| 2019-05-08T04:12:26
| 2019-05-08T04:12:26
| 94,275,573
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,430
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 5 14:24:26 2017
@author: jp
"""
from Serre2dc import *
from scipy import *
from pylab import plot, show, legend,xlim,ylim,savefig,title,xlabel,ylabel,clf, loglog
import csv
import os
from numpy.linalg import norm,solve
from time import time
def copyarraytoC(a):
n = len(a)
b = mallocPy(n)
for i in range(n):
writetomem(b,i,a[i])
return b
def copyarrayfromC(a,n):
b = [0]*n
for i in range(n):
b[i] = readfrommem(a,i)
return b
def copywritearraytoC(a,b):
n = len(a)
for i in range(n):
writetomem(b,i,a[i])
def makevar(sx,ex,dx,st,et,dt):
x = arange(sx, ex, dx)
t = arange(st, et, dt)
return x,t
def getGfromupy(h,u,bed,u0,u1,h0,h1,b0,b1,dx):
idx = 1.0 / dx
ithree = 1.0 / 3.0
n = len(h)
G = zeros(n)
for i in range(1,n-1):
th = h[i]
thx = 0.5*idx*(h[i+1] - h[i-1])
tbx = 0.5*idx*(bed[i+1] - bed[i-1])
tbxx = idx*idx*(bed[i+1] -2*bed[i] + bed[i-1])
D = th + th*thx*tbx + 0.5*th*th*tbxx + th*tbx*tbx
ai = -ithree*idx*idx*th*th*th + 0.5*idx*th*th*thx
bi = D + 2.0*ithree*idx*idx*th*th*th
ci = -ithree*idx*idx*th*th*th - 0.5*idx*th*th*thx
G[i] = ai*u[i-1] + bi*u[i] + ci*u[i+1]
#boundary
#i=0
i=0
th = h[i]
thx = 0.5*idx*(h[i+1] - h0)
tbx = 0.5*idx*(bed[i+1] - b0)
tbxx = idx*idx*(bed[i+1] -2*bed[i] + b0)
D = th + th*thx*tbx + 0.5*th*th*tbxx + th*tbx*tbx
ai = -ithree*idx*idx*th*th*th + 0.5*idx*th*th*thx
bi = D + 2.0*ithree*idx*idx*th*th*th
ci = -ithree*idx*idx*th*th*th - 0.5*idx*th*th*thx
G[i] = ai*u0 + bi*u[i] + ci*u[i+1]
#i = n-1
i = n-1
th = h[i]
thx = 0.5*idx*(h1 - h[i-1])
tbx = 0.5*idx*(b1 - bed[i-1])
tbxx = idx*idx*(b1 -2*bed[i] + bed[i-1])
D = th + th*thx*tbx + 0.5*th*th*tbxx + th*tbx*tbx
ai = -ithree*idx*idx*th*th*th + 0.5*idx*th*th*thx
bi = D + 2.0*ithree*idx*idx*th*th*th
ci = -ithree*idx*idx*th*th*th - 0.5*idx*th*th*thx
G[i] = ai*u[i-1] + bi*u[i] + ci*u1
return G
def Dambreak(h0,h1,x0,x):
n = len(x)
h = zeros(n)
u = zeros(n)
G = zeros(n)
b = zeros(n)
for i in range(n):
if (x[i] < x0):
h[i] = h1
else:
h[i] = h0
return h,u,G,b,h
def DrybedSWWANA(h1,x,t,g):
n = len(x)
u = zeros(n)
h = zeros(n)
G = zeros(n)
for i in range(n):
if(x[i] >= -t*sqrt(g*h1) and x[i] <= 2*t*sqrt(g*h1) ):
u[i] = 2.0 / 3.0 *(sqrt(g*h1) + x[i] / t)
h[i] = 4.0 / (9.0 * g) *(sqrt(g*h1) - 0.5*x[i] / t)**2
ux = 2.0 / 3.0 *(1.0 / t)
uxx = 0
hx = 2.0 / (9.0 * g * t*t) *(x[i] - 2*t*sqrt(g*h1))
G[i] = u[i]*h[i] - h[i]*h[i]*hx*ux
elif(x[i] < -t*sqrt(g*h1)):
h[i] = h1
return h,u, G
#Forcing Problem
wdir = "/home/jp/Documents/PhD/project/data/DryBedPaper/Dambreak/t1/"
if not os.path.exists(wdir):
os.makedirs(wdir)
g = 9.81
h1 = 1.0
h0 = 0.0
x0 = 0
startx = -100
sx = startx
endx = 100
ex = endx
startt = 0.0
st = startt
endt = 10
et = endt
dx = 0.01
l = 0.01
dt = l*dx
t = startt
x = arange(startx,endx +0.1*dx, dx)
xhuMbeg = array([x[0] - 1.5*dx, x[0] - dx, x[0] -0.5*dx])
xhuMend = array([x[-1] + 0.5*dx, x[-1] + dx, x[-1] + 1.5*dx])
xbMbeg = [x[0] - (2 + 0.5)*dx,x[0] - (2 + 1.0/6.0)*dx,x[0] - (2 - 1.0/6.0)*dx,x[0] - (2 - 0.5)*dx,x[0] - (1 + 1.0/6.0)*dx,x[0] - (1 - 1.0/6.0)*dx,x[0] - (1 - 0.5)*dx]
xbMend = [x[-1] + (1 - 0.5)*dx,x[-1] + (1 - 1.0/6.0)*dx,x[-1] + (1 + 1.0/6.0)*dx,x[-1] + (1 + 0.5)*dx,x[-1] + (2 - 1.0/6.0)*dx,x[-1] + (2 + 1.0/6.0)*dx,x[-1] + (2 + 0.5)*dx]
theta = 1.2
h,u,G,b,w = Dambreak(h0,h1,x0,x)
hMbeg,uMbeg,GMbeg,bta,wMbeg = Dambreak(h0,h1,x0,xhuMbeg)
hMend ,uMend ,GMend ,bta,wMend = Dambreak(h0,h1,x0,xhuMend)
hta,uta,Gta,bMbeg,wta = Dambreak(h0,h1,x0,xbMbeg)
hta,uta,Gta,bMend,wta = Dambreak(h0,h1,x0,xbMbeg)
n = len(x)
hnBC = 3
hnbc = 3*n + 2*hnBC
bnMBC = 7
bnBC = 4
bnbc = 3*n + 1 + 2*(bnBC -1)
unBC = 3
unbc = 2*n + 1 + 2*(unBC -1)
niBC = 4
xbegC = arange(sx - niBC*dx,sx,dx)
xendC = arange(ex + dx,ex + (niBC+1)*dx,dx)
b0C = b[0]*ones(niBC)
b1C = b[-1]*ones(niBC)
u0C = u[0]*ones(niBC)
u1C = u[-1]*ones(niBC)
h0C = h[0]*ones(niBC)
h1C = h[-1]*ones(niBC)
G0C = G[0]*ones(niBC)
G1C = G[-1]*ones(niBC)
xbcC = concatenate([xbegC,x,xendC])
bbcC = concatenate([b0C,b,b1C])
hbcC = concatenate([h0C,h,h1C])
ubcC = concatenate([u0C,u,u1C])
GbcC = concatenate([G0C,G,G1C])
xbcC_c = copyarraytoC(xbcC)
bbcC_c = copyarraytoC(bbcC)
hbcC_c = copyarraytoC(hbcC)
ubcC_c = copyarraytoC(ubcC)
GbcC_c = copyarraytoC(GbcC)
Eni = HankEnergyall(xbcC_c,hbcC_c,ubcC_c,bbcC_c,g,n + 2*niBC,niBC,dx)
Pni = uhall(xbcC_c,hbcC_c,ubcC_c,n + 2*niBC,niBC,dx)
Mni = hall(xbcC_c,hbcC_c,n + 2*niBC,niBC,dx)
Gni = Gall(xbcC_c,GbcC_c,n + 2*niBC,niBC,dx)
deallocPy(hbcC_c)
deallocPy(ubcC_c)
deallocPy(GbcC_c)
h_c = copyarraytoC(h)
G_c = copyarraytoC(G)
x_c = copyarraytoC(x)
b_c = copyarraytoC(b)
u_c = mallocPy(n)
hMbeg_c = copyarraytoC(hMbeg)
hMend_c = copyarraytoC(hMend)
wMbeg_c = copyarraytoC(wMbeg)
wMend_c = copyarraytoC(wMend)
bMbeg_c = copyarraytoC(bMbeg)
bMend_c = copyarraytoC(bMend)
GMbeg_c = copyarraytoC(GMbeg)
GMend_c = copyarraytoC(GMend)
uMbeg_c = copyarraytoC(uMbeg)
uMend_c = copyarraytoC(uMend)
ubc_c = mallocPy(unbc)
hbc_c = mallocPy(hnbc)
wbc_c = mallocPy(hnbc)
Gbc_c = mallocPy(hnbc)
bbc_c = mallocPy(bnbc)
t = 0.0
#Just an FEM solve here
while t < endt:
evolvewrapForcingANA(h_c,G_c,b_c,hMbeg_c,hMend_c,GMbeg_c,GMend_c,wMbeg_c,wMend_c,bMbeg_c,bMend_c,uMbeg_c,uMend_c,n,hnBC,hnbc,bnBC,bnMBC,bnbc,unBC,unbc,theta,dx,dt,g);
t = t + dt
print(t)
hSWWE,uSWWE,GSWWE = DrybedSWWANA(h1,x,t,g)
hC = copyarrayfromC(h_c,n)
GC = copyarrayfromC(G_c,n)
ReconandSolve(h_c,G_c,b_c,hMbeg_c,hMend_c,GMbeg_c,GMend_c,wMbeg_c,wMend_c,bMbeg_c,bMend_c,uMbeg_c,uMend_c,n,hnBC,hnbc,bnBC,bnMBC,bnbc,unBC,unbc,theta,dx,dt,g,Gbc_c,hbc_c,wbc_c,ubc_c,bbc_c)
ubcC = copyarrayfromC(ubc_c,unbc)
uC = ubcC[unBC:-unBC:2]
hbcC = copyarrayfromC(hbc_c,hnbc)
wbcC = copyarrayfromC(wbc_c,hnbc)
GbcC = copyarrayfromC(Gbc_c,hnbc)
bbcC = copyarrayfromC(bbc_c,bnbc)
u0Cn = uC[0]*ones(niBC)
u1Cn = uC[-1]*ones(niBC)
h0Cn = hC[0]*ones(niBC)
h1Cn = hC[-1]*ones(niBC)
G0Cn = GC[0]*ones(niBC)
G1Cn = GC[-1]*ones(niBC)
hbcC = concatenate([h0Cn,hC,h1Cn])
ubcC = concatenate([u0Cn,uC,u1Cn])
GbcC = concatenate([G0Cn,GC,G1Cn])
hbcC_c = copyarraytoC(hbcC)
ubcC_c = copyarraytoC(ubcC)
GbcC_c = copyarraytoC(GbcC)
En = HankEnergyall(xbcC_c,hbcC_c,ubcC_c,bbcC_c,g,n + 2*niBC,niBC,dx)
Pn = uhall(xbcC_c,hbcC_c,ubcC_c,n + 2*niBC,niBC,dx)
Mn = hall(xbcC_c,hbcC_c,n + 2*niBC,niBC,dx)
Gn = Gall(xbcC_c,GbcC_c,n + 2*niBC,niBC,dx)
Eerr = abs(En- Eni)/ abs(Eni)
Perr = abs(Pn- Pni)
Gerr = abs(Gn- Gni)
Merr = abs(Mn- Mni)/ abs(Mni)
deallocPy(hbcC_c)
deallocPy(ubcC_c)
deallocPy(GbcC_c)
deallocPy(h_c)
deallocPy(G_c)
deallocPy(u_c)
deallocPy(ubc_c)
deallocPy(hbc_c)
deallocPy(wbc_c)
deallocPy(Gbc_c)
deallocPy(bbc_c)
deallocPy(hMbeg_c)
deallocPy(GMbeg_c)
deallocPy(uMbeg_c)
deallocPy(hMend_c)
deallocPy(GMend_c)
deallocPy(uMend_c)
deallocPy(wMbeg_c)
deallocPy(wMend_c)
|
[
"jordanpitt3141@github.com"
] |
jordanpitt3141@github.com
|
3514ec92ed02abcbee5fee7ae8ee2db6b9582ad4
|
a087b6fbd9bc4f3ec1d7f48268e733e106369fcd
|
/food_project/recipe/ingredient.py
|
7e888e99153f0a4be95aaba923d7f84a712d1aff
|
[] |
no_license
|
zhakguder/FoodProject
|
8b628583fb2f0ee3537f5340301b78c926401968
|
7ab881d9884d366efffa7a4c84c27c02fbe7a467
|
refs/heads/main
| 2023-03-05T18:30:16.939582
| 2021-02-14T19:50:04
| 2021-02-14T19:50:04
| 327,974,533
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,746
|
py
|
#!/usr/bin/env python3
class Ingredient:
def __init__(self, name, id, quantity, entropy):
self.name = name
self.id = id # column number in recipe ingredients dataframe
self.quantity = quantity
self.entropy = entropy
# clusters = {}
ingredients_to_clusters = {} # TODO: put this into mongo
# TODO: separate populating this into an independent task, do it upfront once
# not with every run of the program
class IngredientCluster:
def __init__(self, name, *ingredients):
self.name = name
self.ingredients = ingredients
# self.quantity = 0
# clusters[name] = self
self.save_ingredients() # TODO: functions shouldn't have side-effects!!!
def save_ingredients(self):
# TODO: not written well
for ingredient in self.ingredients:
ingredients_to_clusters[ingredient.name] = self.name
def add_ingredient(self, ingredient):
self.ingredients += (ingredient,)
def get_quantity(self):
# self.quantity = sum([x.quantity for x in self.ingredients])
# return self.quantity
return sum([x.quantity for x in self.ingredients])
def get_entropy(self):
# self.entropy = sum([x.entropy for x in self.ingredients])
# return self.entropy
try:
n_ingredients = len([x for x in self.ingredients if x.entropy != 0])
return sum([x.entropy for x in self.ingredients]) / n_ingredients
except:
return 0
@staticmethod
def ingredient_in_cluster(ing_name):
# return [
# cluster.name for _, cluster in clusters if ing_name in cluster.ingredients
# ]
return ingredients_to_clusters.get(ing_name, None)
|
[
"zeynep.hakguder@huskers.unl.edu"
] |
zeynep.hakguder@huskers.unl.edu
|
3a390a443f62dda7aaf9ce5b667f5dfe9dd4c376
|
9b39e32f36e4f949d617e158c5034faa9595aaf0
|
/python/PHYS14/QCD_Pt-1800to2400_Tune4C_13TeV_pythia8_cff.py
|
e21dd683067ea39f4ddd04af1141721dd2490d0b
|
[] |
no_license
|
awhitbeck/SuSySubstructure
|
c24043227697f74bb85edc1cb84d65b884a141d5
|
1422a1f6e46468fdd103f92ccbdffc34468cbbd9
|
refs/heads/synch_June26_2015
| 2021-01-19T07:43:59.339373
| 2015-08-16T09:35:49
| 2015-08-16T09:35:49
| 14,053,341
| 0
| 2
| null | 2015-08-08T08:24:37
| 2013-11-01T20:44:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,133
|
py
|
import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'/store/mc/Phys14DR/QCD_Pt-1800to2400_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_trkalmb_PHYS14_25_V1-v2/10000/7C6B4D11-AC7C-E411-8A5F-002590D0B0D8.root',
'/store/mc/Phys14DR/QCD_Pt-1800to2400_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_trkalmb_PHYS14_25_V1-v2/10000/C8C83950-C37C-E411-90F6-20CF305B0572.root',
'/store/mc/Phys14DR/QCD_Pt-1800to2400_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_trkalmb_PHYS14_25_V1-v2/10000/D89A3C72-C67C-E411-9BCF-00248C9BA537.root',
'/store/mc/Phys14DR/QCD_Pt-1800to2400_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_trkalmb_PHYS14_25_V1-v2/20000/085FCE99-877C-E411-900C-20CF3027A577.root',
'/store/mc/Phys14DR/QCD_Pt-1800to2400_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_trkalmb_PHYS14_25_V1-v2/20000/2A6589BB-A07C-E411-A137-00259074AE7A.root' ] );
secFiles.extend( [
] )
|
[
"whitbeck.andrew@gmail.com"
] |
whitbeck.andrew@gmail.com
|
80323aa0c33ac672897ee319b3f16f71e768fb5c
|
da47e42519b6d5eb37bdb634fd618672706e79da
|
/localizacion_metromed/tys_http/__manifest__.py
|
5094cf573f3be281a9e1d379e90f33313dc7ee0c
|
[] |
no_license
|
Tysamncaweb/produccion2
|
02bbbccefc4f4cd0d0948b1b0552d931f804fb9b
|
b95909d0689fc787185290565f0873040a6027cf
|
refs/heads/master
| 2022-04-26T13:51:22.316294
| 2020-04-29T19:58:35
| 2020-04-29T19:58:35
| 260,013,639
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 916
|
py
|
# -*- coding: utf-8 -*-
{
'name': "submodules/tys_http",
'summary': """
Short (1 phrase/line) summary of the module's purpose, used as
subtitle on modules listing or apps.openerp.com""",
'description': """
Long description of module's purpose
""",
'author': "My Company",
'website': "http://www.yourcompany.com",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/master/odoo/addons/base/module/module_data.xml
# for the full list
'category': 'Uncategorized',
'version': '0.1',
# any module necessary for this one to work correctly
'depends': ['base'],
# always loaded
'data': [
# 'security/ir.model.access.csv',
'views/views.xml',
'views/templates.xml',
],
# only loaded in demonstration mode
'demo': [
'demo/demo.xml',
],
}
|
[
"soporte.innova2129@gmail.com"
] |
soporte.innova2129@gmail.com
|
5ea6dac2e1e68ac62b2b84dc089bade38e2b1911
|
4e135c9d35a033c8f9f5c70e57ae27f61b4f34fb
|
/19_Sympact/Python/sympact.py
|
cc5b733f58860962724032b8ef0ef8b107763984
|
[] |
no_license
|
xpessoles/TP_Documents_PSI
|
a95b57eebd32a3641a02623e01cd3ab32f3155c2
|
76bd77fed5a88337e7669c8ca01944020de47458
|
refs/heads/master
| 2023-08-23T21:15:23.205881
| 2023-08-15T06:15:29
| 2023-08-15T06:15:29
| 168,961,130
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,417
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 21 07:27:14 2022
@author: xpess
"""
import numpy as np
import matplotlib.pyplot as plt
def trapeze(les_t:list,t1:float,t2:float,amax:float,vmax:float)->list :
"""
Détermination des loi horaires en trapèze
Parameters
----------
les_t : list
DESCRIPTION.
t1 : float
temps d'accélération.
t2 : float
temps d'accélération + vitesse constante
amax : float
DESCRIPTION.
vmax : float
DESCRIPTION.
Returns
-------
list
DESCRIPTION.
"""
les_pos = []
les_vit = []
les_acc = []
x1,v1 = 0,0
x2,v2 = 0,0
for t in les_t :
if t<t1 :
les_pos.append(0.5*amax*t*t)
les_vit.append(amax*t)
les_acc.append(amax)
x1,v1,t11 = les_pos[-1], les_vit[-1],t
elif t>=t1 and t<t2 :
les_pos.append(v1*(t-t11)+x1)
les_vit.append(v1)
les_acc.append(0)
x2,v2,t22 = les_pos[-1], les_vit[-1],t
elif t>=t2 and t<=t1+t2 :
les_pos.append(-0.5*amax*(t-t22)**2+v2*(t-t22)+x2)
les_vit.append(v2-amax*(t-t22))
les_acc.append(-amax)
else :
les_pos.append(les_pos[-1])
les_vit.append(0)
les_acc.append(0)
return np.array(les_pos),np.array(les_vit),np.array(les_acc)
def plot_pva(les_t,les_pos,les_vit,les_acc):
plt.plot(les_t,les_pos)
plt.plot(les_t,les_vit)
plt.plot(les_t,les_acc)
plt.grid()
plt.show()
def loi_ES(les_theta,R,H):
les_phi = np.arctan2(H+R*np.sin(les_theta),R*np.cos(les_theta))
return les_phi
def deriv(les_t,les_x):
les_v = []
for i in range(len(les_t)-1):
les_v.append((les_x[i+1]-les_x[i])/(les_t[i+1]-les_t[i]))
les_v.append(les_v[-1])
return np.array(les_v)
J1 = 188939e-9 # kgm2
J2 = 2233294973e-9 # kgm2
t1 = .1645 #
t2 = t1+0.671
theta_0 = -np.radians(30.61)
vmax = 4.93 # rad/s
amax = 30 # rad/s²
R,H = 0.081,0.112 # A modifier
les_t = np.linspace(0,t1+t2+t1,12000) # On rajoute +t1 pour avoir des points en plus...
les_pos,les_vit,les_acc = trapeze(les_t, t1, t2, amax, vmax)
# Calcul des positions
les_theta = les_pos + theta_0
les_phi = loi_ES(les_theta,R,H)
plt.plot(les_t,np.degrees(les_theta))
plt.plot(les_t,np.degrees(les_phi))
# Calcul des vitesses
les_thetap = deriv(les_t,les_theta)
les_phip = deriv(les_t,les_phi)
plt.figure()
plt.plot(les_t,les_thetap)
plt.plot(les_t,les_phip)
# Calcul des accélerations
les_thetapp = deriv(les_t,les_thetap)
les_phipp = deriv(les_t,les_phip)
plt.figure()
plt.plot(les_t,les_thetapp)
plt.plot(les_t,les_phipp)
# les_theta = les_pos
# les_phi = loi_ES(les_theta,R,H)
# les_thetap = deriv(les_t,les_theta)
# les_phip = deriv(les_t,les_phi)
# les_thetapp = deriv(les_t[:-1],les_thetap)
# les_phipp = deriv(les_t[:-1],les_phi)
# plt.plot(les_theta,label='Entrée')
# plt.plot(les_phi,label='Sortie')
# plt.legend()
Mu,M,g = 0.5,5,9.81
alpha =np.radians(45)
deb,fin = 1,1000
Cm = (J2*les_thetap*les_thetapp+J1*les_phip*les_phipp+Mu*M*g*np.cos(les_phi-alpha)*les_thetap)/les_thetap
plt.plot(les_t,Mu*M*g*np.cos(les_phi-alpha))
#Cm = (J2*les_thetap[deb:fin]*les_thetapp[deb:fin]+J1*les_phip[deb:fin]*les_phipp[deb:fin])/les_thetap[deb:fin]
plt.figure()
plt.plot(les_t,Cm)
# # ENTREE : THETA
# # SORTIE : PHI
|
[
"xpessoles.ptsi@free.fr"
] |
xpessoles.ptsi@free.fr
|
b34cc4a6c6bb510a6cb526348b14851f8cf7b341
|
27c9b374a75550252ddfe5da400fad891c6de590
|
/chars/monster_scripts/MonsterAudio.py
|
d92db631a6f408a33e87967e694889278c5eabfa
|
[] |
no_license
|
Dynamique-Zak/Zelda_BlenderGame
|
03065416939deb3ce18007909ccc278c736baad0
|
0f5d5d15bfa79e9f8ea15f0ebcb76bce92f77a21
|
refs/heads/master
| 2016-08-13T00:12:34.746520
| 2016-02-19T23:18:27
| 2016-02-19T23:18:27
| 49,572,402
| 30
| 16
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 383
|
py
|
import aud
# load sound device
device = aud.device()
class MonsterAudio:
def __init__(self):
self.lastStepFrame = 0
def playStepSound(self, current_frame, frames, audio):
for frame in frames:
r = range(frame-1, frame+1)
if ( (current_frame >= frame and current_frame <= frame+1) and self.lastStepFrame != frame):
self.lastStepFrame = frame
device.play(audio)
|
[
"schartier.isaac@gmail.com"
] |
schartier.isaac@gmail.com
|
b4114f55a3b44215c32b9f099140ad31264d5e11
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/simple-cipher/85118e8764e3424d97f4175fe0cad1fd.py
|
28007cdd0fefc6a8857a63ff19f4b2b3418fd3e8
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 1,923
|
py
|
# cgi path
# William Morris
# exercism.io
# cipher.py
import random
class Caesar:
def __init__(self):
self.key = 'd'
def encode(self,phrase):
encoded_phrase = ''
for letter in phrase:
if letter.isalpha():
encoded_phrase += _shift(letter.lower(),self.key,1)
return encoded_phrase
def decode(self,phrase):
decoded_phrase = ''
for letter in phrase:
if letter.isalpha():
decoded_phrase += _shift(letter.lower(),self.key,-1)
return decoded_phrase
class Cipher:
def __init__(self, key = None):
if key:
self.key = key
else:
self.key = ''.join([chr(random.randint(97,122)) for i in range(100)])
def encode(self,phrase):
keylist = list(self.key)
phrase = list(phrase)
while len(keylist) < len(phrase):
keylist += keylist
encoded_phrase = ''
for letter,key in zip(phrase,keylist):
if letter.isalpha():
encoded_phrase +=_shift(letter,key,1)
return encoded_phrase
def decode(self,phrase):
keylist = list(self.key)
while len(keylist) < len(phrase):
keylist += keylist
decoded_phrase = ''
for letter,key in zip(phrase,keylist):
if letter.isalpha():
decoded_phrase +=_shift(letter,key,-1)
return decoded_phrase
def _shift(letter,key,sign):
'''letter and key must be lower case, sign must be 1 (for encode)
or -1 (for decode)'''
shift = ord(key)-97
letter_ascii = ord(letter)
letter_ascii += sign*shift
while letter_ascii < 97 or letter_ascii > 122:
letter_ascii -= sign*26
return chr(letter_ascii)
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
2d6be584dbe664f1c96da1731febcec3c8fc88fb
|
2481cde6506743565dff2b405a2396daf208ab3e
|
/src/notification/migrations/0026_notificationmessage_sender.py
|
567ce2e804646575b24a3ee33b7668cbd7a59bdb
|
[
"Apache-2.0"
] |
permissive
|
aropan/clist
|
4819a3036d179595e4df8c646aff2ed593b9dad3
|
5c805b2af71acee97f993f19d8d4e229f7f5b411
|
refs/heads/master
| 2023-08-31T11:15:17.987776
| 2023-08-27T21:51:14
| 2023-08-27T21:52:16
| 187,111,853
| 276
| 35
|
Apache-2.0
| 2023-09-06T18:42:53
| 2019-05-16T22:57:03
|
Python
|
UTF-8
|
Python
| false
| false
| 593
|
py
|
# Generated by Django 3.1.14 on 2022-01-17 00:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('true_coders', '0048_auto_20220111_2315'),
('notification', '0025_auto_20220111_2315'),
]
operations = [
migrations.AddField(
model_name='notificationmessage',
name='sender',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='sender_set', to='true_coders.coder'),
),
]
|
[
"nap0rbl4@gmail.com"
] |
nap0rbl4@gmail.com
|
b8aafb5b60e806817d8bb6084aae738c01bfca0b
|
22b1ca0d3e93c10356a95aa4377a798e3615bcc3
|
/djoauth/djoauth/urls.py
|
7566acb7c068796cad8ba9fafab4d105db12ece4
|
[] |
no_license
|
xtornasol512/djoauth
|
b99142640356d454a738a5047a315449d3a5315a
|
dede596ecc0fee3010f331b01975b5cb51c122dd
|
refs/heads/master
| 2020-04-09T14:43:15.017259
| 2018-12-04T20:37:31
| 2018-12-04T20:37:31
| 160,405,246
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 956
|
py
|
"""djoauth URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from home.views import simple_view, terms
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^accounts/', include('allauth.urls')),
url(r'^$', simple_view, name="simple_view"),
url(r'^terms$', terms, name="terms"),
]
|
[
"xtornasol512@gmail.com"
] |
xtornasol512@gmail.com
|
3afd9866a37000da97a5bae4e35cf4934ba1a2ad
|
0a973640f0b02d7f3cf9211fcce33221c3a50c88
|
/.history/src/qichamao_cmpInfo_20210203092107.py
|
f91c112ba48f192dce00f481f0f2d683966be121
|
[] |
no_license
|
JiajunChen123/IPO_under_review_crawler
|
5468b9079950fdd11c5e3ce45af2c75ccb30323c
|
031aac915ebe350ec816c05a29b5827fde588567
|
refs/heads/main
| 2023-02-26T08:23:09.622725
| 2021-02-04T10:11:16
| 2021-02-04T10:11:16
| 332,619,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,230
|
py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : qichamao_cmpInfo.py
@Time : 2021/02/03 09:17:24
@Author : Jiajun Chen
@Version : 1.0
@Contact : 554001000@qq.com
@License : (C)Copyright 2017-2018, Liugroup-NLPR-CASIA
'''
import requests
from bs4 import BeautifulSoup
import time
import csv
import pandas as pd
import numpy as np
# login = {'user':'13710149700',
# 'password':'123456'}
# 使用的网站是企查查
base_url = 'https://www.qichamao.com'
# requests.post('https://www.qichamao.com',data=login,headers=afterLogin_headers)
# 需要在浏览器上登录企查猫账户,并将cookie文件添加在此
afterLogin_headers = {'Cookie':'qznewsite.uid=y4eseo3a1q4xbrwimor3o5tm; qz.newsite=6C61702DD95709F9EE190BD7CCB7B62C97136BAC307B6F0B818EC0A943307DAB61627F0AC6CD818268C10D121B37F840C1EF255513480EC3012A7707443FE523DD7FF79A7F3058E5E7FB5CF3FE3544235D5313C4816B54C0CDB254F24D8ED5235B722BCBB23BE62B19A2370E7F0951CD92A731FE66C208D1BE78AA64758629806772055F7210C67D442DE7ABBE138EF387E6258291F8FBF85DFF6C785E362E2903705A0963369284E8652A61531293304D67EBB8D28775FBC7D7EBF16AC3CCA96F5A5D17; Hm_lvt_55ad112b0079dd9ab00429af7113d5e3=1611805092,1612262918; Hm_lpvt_55ad112b0079dd9ab00429af7113d5e3=1612262927',
'Referer':'https://www.qichamao.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'}
def get_compInfo(comp):
# 输入公司名称,返回公司基本公司信息
r = requests.get('{}/search/all/{}'.format(base_url,comp),headers=afterLogin_headers)
r.raise_for_status()
r.encoding = 'utf-8' #linux utf-8
soup = BeautifulSoup(r.text,features="html.parser")
url = base_url + soup.find(attrs={'class':'listsec_con'}).a['href']
# soup.find(attrs={'class':'listsec_con'})
time.sleep(5)
rs = requests.get(url,headers=afterLogin_headers)
rs.encoding='utf-8'
soup2 = BeautifulSoup(rs.text,'html.parser')
info = soup2.find(attrs={'class':'qd-table-body li-half f14'}).findAll('div')
info = [i.get_text().strip() for i in info]
compinfo = {'法定代表人':info[0],
'纳税人识别号':info[1],
'名称':info[2],
'机构代码':info[3],
'注册号':info[4],
'注册资本':info[5],
'统一社会信用代码':info[6],
'登记机关':info[7],
'经营状态':info[8],
'成立日期':info[9],
'企业类型':info[10],
'经营期限':info[11],
'所属地区':info[12],
'核准时间':info[13],
'企业地址':info[14],
'经营范围':info[15]}
return compinfo
if __name__ == '__main__':
import pickle
with open('./saved_config/zb_zxb_stocksInfo.pkl', 'rb') as file:
all_data = pickle.load(file)
j =0
for i, (k, v) in enumerate(all_data.items()):
if v['统一社会信用代码'] == '':
try:
compinfo = get_compInfo(v['机构名称'])
print('成功获得 ',compinfo['名称'])
v['统一社会信用代码'] = compinfo['统一社会信用代码']
v['经营范围'] = compinfo['经营范围']
except:
print("需要验证码更新")
wait = input("Press Enter to continue.")
compinfo = get_compInfo(v['机构名称'])
v['统一社会信用代码'] = compinfo['统一社会信用代码']
v['经营范围'] = compinfo['经营范围']
else:
j+=1
time.sleep(3)
else:
continue
if j > 60:
break
with open('./saved_config/zb_zxb_stocksInfo.pkl', 'wb') as file:
pickle.dump(all_data,file, pickle.HIGHEST_PROTOCOL)
# your stuff
# df = pd.read_excel('C:/Users/chen/Desktop/IPO_info/P020210122657813200711.xls',skipfooter=1,skiprows=2,index_col='序号',keep_default_na=False,encoding='utf-8',sheet_name=0)
# comp1 = df[' 企业名称'].values
# df2 = pd.read_excel('C:/Users/chen/Desktop/IPO_info/P020210122657813200711.xls',skipfooter=1,skiprows=2,index_col='序号',keep_default_na=False,encoding='utf-8',sheet_name=1)
# comp2 = df2[' 企业名称'].values
# compList =np.append(comp1,comp2)
# # for i in compList:
# # compinfo = get_compInfo(i)
# # csv_columns = ['法定代表人','纳税人识别号','名称','机构代码','注册号','注册资本','统一社会信用代码','登记机关',\
# # '经营状态','成立日期','企业类型','经营期限','所属地区','核准时间','企业地址','经营范围']
# # csv_file = "credit.csv"
# # try:
# # with open(csv_file, 'a+') as csvfile:
# # writer = csv.DictWriter(csvfile, fieldnames=csv_columns)
# # writer.writeheader()
# # writer.writerow(compinfo)
# # except IOError:
# # print("I/O error")
# try:
# with open('C:/Users/chen/Desktop/IPO_info/csrc_dict.pkl', 'rb') as file:
# csrc_dict = pickle.load(file)
# except:
# csrc_dict = {}
# count = 0
# for i in compList:
# count +=1
# i = i.replace(r'*','')
# if i in data:
# if i in csrc_dict and i['统一社会信用代码'] != '':
# continue
# try:
# compinfo = get_compInfo(i)
# data[i]['统一社会信用代码'] = compinfo['统一社会信用代码']
# data[i]['经营范围'] = compinfo['经营范围']
# csrc_dict.update(data[i])
# except:
# print('cannot use anymore')
# else:
# print('cannot found value: ',i)
# if count % 20 == 0:
# time.sleep(60)
# with open('C:/Users/chen/Desktop/IPO_info/csrc.pkl', 'rb') as file:
# pickle.dump(csrc_dict, file, pickle.HIGHEST_PROTOCOL)
|
[
"chenjiajun.jason@outlook.com"
] |
chenjiajun.jason@outlook.com
|
b4ccb64fb9f5a2c0c23bf3b386d43e9fbc4568bd
|
01a085bb89225d0390316036a915b2b8d7403219
|
/bin/dynamodb_dump
|
8550b8bc85167f6177c7a589e20cdfa1b46810ab
|
[] |
no_license
|
vipinsachdeva/elasticluster_full
|
0199ee00e716f285173c8974fdf9570ab5d43470
|
71160196682a8d18a9547d5d28e8a885b067924d
|
refs/heads/master
| 2021-05-15T11:35:44.706476
| 2017-10-25T22:37:37
| 2017-10-25T22:37:37
| 108,333,786
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,153
|
#!/home/vsachde/elasticluster/bin/python
import argparse
import errno
import os
import boto
from boto.compat import json
from boto.compat import six
DESCRIPTION = """Dump the contents of one or more DynamoDB tables to the local filesystem.
Each table is dumped into two files:
- {table_name}.metadata stores the table's name, schema and provisioned
throughput.
- {table_name}.data stores the table's actual contents.
Both files are created in the current directory. To write them somewhere else,
use the --out-dir parameter (the target directory will be created if needed).
"""
def dump_table(table, out_dir):
metadata_file = os.path.join(out_dir, "%s.metadata" % table.name)
data_file = os.path.join(out_dir, "%s.data" % table.name)
with open(metadata_file, "w") as metadata_fd:
json.dump(
{
"name": table.name,
"schema": table.schema.dict,
"read_units": table.read_units,
"write_units": table.write_units,
},
metadata_fd
)
with open(data_file, "w") as data_fd:
for item in table.scan():
# JSON can't serialize sets -- convert those to lists.
data = {}
for k, v in six.iteritems(item):
if isinstance(v, (set, frozenset)):
data[k] = list(v)
else:
data[k] = v
data_fd.write(json.dumps(data))
data_fd.write("\n")
def dynamodb_dump(tables, out_dir):
try:
os.makedirs(out_dir)
except OSError as e:
# We don't care if the dir already exists.
if e.errno != errno.EEXIST:
raise
conn = boto.connect_dynamodb()
for t in tables:
dump_table(conn.get_table(t), out_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
prog="dynamodb_dump",
description=DESCRIPTION
)
parser.add_argument("--out-dir", default=".")
parser.add_argument("tables", metavar="TABLES", nargs="+")
namespace = parser.parse_args()
dynamodb_dump(namespace.tables, namespace.out_dir)
|
[
"vipin@kryptonite"
] |
vipin@kryptonite
|
|
823128232096e5a64b6c166ca62a3b471935bf31
|
4d01bd8003ac64b2a688db12108b472387c999c4
|
/Q_Q.py
|
a27e43ccf178755bb31aa68761a4fffd6c588111
|
[] |
no_license
|
BlackHat-S/PWN
|
2ef6aa7db3baafe009ac31631bdaffaf043f3b85
|
cde1f9ee3258aa56c2634995d5baec14b500a399
|
refs/heads/master
| 2021-01-07T00:44:20.208899
| 2020-03-21T04:16:14
| 2020-03-21T04:29:47
| 241,529,666
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
from pwn import *
#p=process('./Q_Q')
p=remote('121.40.92.129',28022)
#gdb.attach(p)
s2='you\x11need"pwntools!3'
#s2=p32(0x11756F79)
#s2=p32(0x6e11756f)
p.sendline(s2)
payload='a'*19+p32(0x8181B1B)
p.sendline(payload)
p.interactive()
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
39763dada35168638ae5e1e8b7ec0faa6847b5ba
|
0775a2175ddc9f41b4f7637a388623ca9ef15259
|
/chap8/16.子数组的最大累加和问题/16_maxSubArray.py
|
fb63c569d4ab173bbb1254567316ee40c6b6c431
|
[] |
no_license
|
huang-jingwei/Coding-Interview-Guide
|
6839876457b3cf01a08c5623463e66fe9efa7416
|
a42f45213c94d529f69a61f0bda92eddfe5bdfea
|
refs/heads/master
| 2023-04-16T20:05:40.615475
| 2021-05-05T03:32:25
| 2021-05-05T03:32:25
| 286,343,194
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 592
|
py
|
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
if len(nums) == 1: # 只有一个元素
return nums[0]
maxSubArraySum = [0] * len(nums)
for index in range(len(maxSubArraySum)):
if index == 0:
maxSubArraySum[index] = nums[index]
elif index > 0:
if maxSubArraySum[index - 1] >= 0:
maxSubArraySum[index] = maxSubArraySum[index - 1] + nums[index]
else:
maxSubArraySum[index] = nums[index]
return max(maxSubArraySum)
|
[
"2194454302@qq.com"
] |
2194454302@qq.com
|
1801f478f73579ae23ce3d61b2ed1d64b5c7e40f
|
1cc8604dff9713d3879599f1876a6ea313ebe1fb
|
/pysc2/lib/stopwatch.py
|
6c4202c859716afb77ef1155c82b1955ffcfd8d1
|
[
"Apache-2.0"
] |
permissive
|
SoyGema/pysc2
|
c363ec768ebf94e7b0fa08e136b36b7432ae1b44
|
e5de62023ec45ac212016b5404dd73272109d9d4
|
refs/heads/master
| 2022-02-08T21:41:46.530129
| 2022-01-29T13:11:15
| 2022-01-29T13:11:15
| 143,897,552
| 1
| 0
|
Apache-2.0
| 2018-08-07T16:06:23
| 2018-08-07T16:06:22
| null |
UTF-8
|
Python
| false
| false
| 7,935
|
py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A stopwatch to check how much time is used by bits of code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import math
import os
import sys
import threading
import time
from future.builtins import range # pylint: disable=redefined-builtin
import six
class Stat(object):
"""A set of statistics about a single value series."""
__slots__ = ("num", "min", "max", "sum", "sum_sq")
def __init__(self):
self.reset()
def reset(self):
self.num = 0
self.min = 1000000000
self.max = 0
self.sum = 0
self.sum_sq = 0
def add(self, val):
self.num += 1
if self.min > val:
self.min = val
if self.max < val:
self.max = val
self.sum += val
self.sum_sq += val**2
@property
def avg(self):
return 0 if self.num == 0 else self.sum / self.num
@property
def dev(self):
"""Standard deviation."""
if self.num == 0:
return 0
return math.sqrt(max(0, self.sum_sq / self.num - (self.sum / self.num)**2))
def merge(self, other):
self.num += other.num
self.min = min(self.min, other.min)
self.max = max(self.max, other.max)
self.sum += other.sum
self.sum_sq += other.sum_sq
@staticmethod
def build(summation, average, standard_deviation, minimum, maximum, number):
stat = Stat()
if number > 0:
stat.num = number
stat.min = minimum
stat.max = maximum
stat.sum = summation
stat.sum_sq = number * (standard_deviation**2 + average**2)
return stat
@staticmethod
def parse(s):
if s == "num=0":
return Stat()
parts = (float(p.split(":")[1]) for p in s.split(", "))
return Stat.build(*parts)
def __str__(self):
if self.num == 0:
return "num=0"
return "sum: %.4f, avg: %.4f, dev: %.4f, min: %.4f, max: %.4f, num: %d" % (
self.sum, self.avg, self.dev, self.min, self.max, self.num)
class StopWatchContext(object):
"""Time an individual call."""
__slots__ = ("_sw", "_start")
def __init__(self, stopwatch, name):
self._sw = stopwatch
self._sw.push(name)
def __enter__(self):
self._start = time.time()
def __exit__(self, unused_exception_type, unused_exc_value, unused_traceback):
self._sw.add(self._sw.pop(), time.time() - self._start)
class TracingStopWatchContext(StopWatchContext):
"""Time an individual call, but also output all the enter/exit calls."""
def __enter__(self):
super(TracingStopWatchContext, self).__enter__()
self._log(">>> %s" % self._sw.cur_stack())
def __exit__(self, *args, **kwargs):
self._log("<<< %s: %.6f secs" % (self._sw.cur_stack(),
time.time() - self._start))
super(TracingStopWatchContext, self).__exit__(*args, **kwargs)
def _log(self, s):
print(s, file=sys.stderr)
class FakeStopWatchContext(object):
"""A fake stopwatch context for when the stopwatch is too slow or unneeded."""
__slots__ = ()
def __enter__(self):
pass
def __exit__(self, unused_exception_type, unused_exc_value, unused_traceback):
pass
fake_context = FakeStopWatchContext()
class StopWatch(object):
"""A context manager that tracks call count and latency, and other stats.
Usage:
sw = stopwatch.Stopwatch()
with sw("foo"):
foo()
with sw("bar"):
bar()
@sw.decorate
def func():
pass
func()
print(sw)
"""
__slots__ = ("_times", "_local", "_factory")
def __init__(self, enabled=True, trace=False):
self._times = collections.defaultdict(Stat)
self._local = threading.local()
if trace:
self.trace()
elif enabled:
self.enable()
else:
self.disable()
def disable(self):
self._factory = lambda _: fake_context
def enable(self):
self._factory = lambda name: StopWatchContext(self, name)
def trace(self):
self._factory = lambda name: TracingStopWatchContext(self, name)
def custom(self, factory):
self._factory = factory
def __call__(self, name):
return self._factory(name)
def decorate(self, name_or_func):
"""Decorate a function/method to check its timings.
To use the function's name:
@sw.decorate
def func():
pass
To name it explicitly:
@sw.decorate("name")
def random_func_name():
pass
Args:
name_or_func: the name or the function to decorate.
Returns:
If a name is passed, returns this as a decorator, otherwise returns the
decorated function.
"""
if os.environ.get("SC2_NO_STOPWATCH"):
return name_or_func if callable(name_or_func) else lambda func: func
def decorator(name, func):
@functools.wraps(func)
def _stopwatch(*args, **kwargs):
with self(name):
return func(*args, **kwargs)
return _stopwatch
if callable(name_or_func):
return decorator(name_or_func.__name__, name_or_func)
else:
return lambda func: decorator(name_or_func, func)
def push(self, name):
try:
self._local.stack.append(name)
except AttributeError:
# Using an exception is faster than using hasattr.
self._local.stack = [name]
def pop(self):
stack = self._local.stack
ret = ".".join(stack)
stack.pop()
return ret
def cur_stack(self):
return ".".join(self._local.stack)
def clear(self):
self._times.clear()
def add(self, name, duration):
self._times[name].add(duration)
def __getitem__(self, name):
return self._times[name]
@property
def times(self):
return self._times
def merge(self, other):
for k, v in six.iteritems(other.times):
self._times[k].merge(v)
@staticmethod
def parse(s):
"""Parse the output below to create a new StopWatch."""
stopwatch = StopWatch()
for line in s.splitlines():
if line.strip():
parts = line.split(None)
name = parts[0]
if name != "%": # ie not the header line
rest = (float(v) for v in parts[2:])
stopwatch.times[parts[0]].merge(Stat.build(*rest))
return stopwatch
def str(self, threshold=0.1):
"""Return a string representation of the timings."""
if not self._times:
return ""
total = sum(s.sum for k, s in six.iteritems(self._times) if "." not in k)
table = [["", "% total", "sum", "avg", "dev", "min", "max", "num"]]
for k, v in sorted(self._times.items()):
percent = 100 * v.sum / (total or 1)
if percent > threshold: # ignore anything below the threshold
table.append([
k,
"%.2f%%" % percent,
"%.4f" % v.sum,
"%.4f" % v.avg,
"%.4f" % v.dev,
"%.4f" % v.min,
"%.4f" % v.max,
"%d" % v.num,
])
col_widths = [max(len(row[i]) for row in table)
for i in range(len(table[0]))]
out = ""
for row in table:
out += " " + row[0].ljust(col_widths[0]) + " "
out += " ".join(
val.rjust(width) for val, width in zip(row[1:], col_widths[1:]))
out += "\n"
return out
def __str__(self):
return self.str()
# Global stopwatch is disabled by default to not incur the performance hit if
# it's not wanted.
sw = StopWatch(enabled=False)
|
[
"tewalds@google.com"
] |
tewalds@google.com
|
8ea422ab7d7637a015b6fe31a0e25db3f3cae371
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_75/561.py
|
36d9557ad1663cf9120d0f6c51352667ae7b8581
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,015
|
py
|
import sys
import re
input = sys.stdin
T=int(input.readline())
for i in xrange(1,T+1):
data = input.readline()
data = data.split()
C = int(data[0])
cs = data[1:1+C]
CS = {}
for s in cs:
CS["%s%s" % (s[0], s[1])] = s[2]
CS["%s%s" % (s[1], s[0])] = s[2]
D = int(data[1+C])
ds = data[1+C+1:1+C+1+D]
DS = {}
for s in ds:
DS[s[0]] = s[1]
DS[s[1]] = s[0]
N = int(data[1+C+1+D])
ns = data[1+C+1+D+1:]
S = ns[0] # one string
res = []
opposite = None
pair = None
for s in S:
if len(res):
k1 = "%s%s" % (res[-1], s)
if k1 in CS:
res[-1] = CS[k1]
continue
if s in DS:
if DS[s] in res:
# res = res[0:res.index(DS[s])]
res = []
continue
res.append(s)
# print data
# print C, cs, CS
# print D, ds, DS
# print N, ns
print re.sub("'", '', "Case #%s: %s" % (i, res))
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
01a2e6b7af0a367e62438e4d324ed24336970776
|
9c1b28d9564cfde450f6590031ede667650d494f
|
/player/tasks.py
|
ade314e4fae37740ac46ba2df18ee15c5805a719
|
[] |
no_license
|
gdmurray/rankedgg-backend
|
90f887f4d64ca5a38741ac8e9482f08fc5d6fd7e
|
ac312750a09a70907fe3e5a9ae9e1172c5e341d8
|
refs/heads/master
| 2022-12-18T16:54:27.302293
| 2019-03-24T05:24:23
| 2019-03-24T05:24:23
| 177,320,244
| 0
| 0
| null | 2022-12-08T04:54:05
| 2019-03-23T17:36:14
|
Python
|
UTF-8
|
Python
| false
| false
| 746
|
py
|
from ranked.celery import app
from .models import Player
from channels.layers import get_channel_layer
from asgiref.sync import async_to_sync
@app.task
def update_ranked_data(pk, region=None):
player = Player.objects.get(id=pk)
print(f"Updating Ranked Data for {player.username}")
metadata, updated_player = player.fetch_metadata(include_player=True)
from .serializers import PlayerLeaderBoardSerializer
serializer = PlayerLeaderBoardSerializer(updated_player, many=False,
context={"region": region, "from_task": True}).data
layer = get_channel_layer()
async_to_sync(layer.group_send)('updates', {
'type': 'player_updates',
'content': serializer
})
|
[
"gd-murray@hotmail.com"
] |
gd-murray@hotmail.com
|
62cb22669072ff1e224be98f36817be8899fa801
|
b3ba7762ae4e209a02bf3a47dc40ceff421087d2
|
/evm/chains/tester/__init__.py
|
fd1cdb4b31fe590a3b5ecfd0a0e1dfd75159d7ee
|
[] |
no_license
|
nicksavers/py-evm
|
3c740ae328c5dd0f10c967660a4e452d257fdfa3
|
70631e3b726d093749a7315b8a4c6b1ebc4d7322
|
refs/heads/master
| 2021-08-15T21:35:19.556248
| 2017-11-17T21:20:24
| 2017-11-17T21:20:24
| 111,194,384
| 3
| 0
| null | 2017-11-18T09:56:00
| 2017-11-18T09:55:59
| null |
UTF-8
|
Python
| false
| false
| 4,795
|
py
|
from cytoolz import (
assoc,
)
from eth_utils import (
reversed_return,
)
from evm.chains.chain import Chain
from evm.vm.forks import (
FrontierVM as BaseFrontierVM,
HomesteadVM as BaseHomesteadVM,
EIP150VM as BaseEIP150VM,
SpuriousDragonVM as BaseSpuriousDragonVM,
)
from evm.utils.chain import (
generate_vms_by_range,
)
class MaintainGasLimitMixin(object):
@classmethod
def create_header_from_parent(cls, parent_header, **header_params):
"""
Call the parent class method maintaining the same gas_limit as the
previous block.
"""
return super(MaintainGasLimitMixin, cls).create_header_from_parent(
parent_header,
**assoc(header_params, 'gas_limit', parent_header.gas_limit)
)
class FrontierTesterVM(MaintainGasLimitMixin, BaseFrontierVM):
pass
class BaseHomesteadTesterVM(MaintainGasLimitMixin, BaseHomesteadVM):
pass
class EIP150TesterVM(MaintainGasLimitMixin, BaseEIP150VM):
pass
class SpuriousDragonTesterVM(MaintainGasLimitMixin, BaseSpuriousDragonVM):
pass
INVALID_FORK_ACTIVATION_MSG = (
"The {0}-fork activation block may not be null if the {1}-fork block "
"is non null"
)
@reversed_return
def _generate_vm_configuration(homestead_start_block=None,
dao_start_block=None,
eip150_start_block=None,
spurious_dragon_block=None):
# If no explicit configuration has been passed, configure the vm to start
# with the latest fork rules at block 0
no_declared_blocks = (
spurious_dragon_block is None and
eip150_start_block is None and
homestead_start_block is None
)
if no_declared_blocks:
yield (0, SpuriousDragonTesterVM)
if spurious_dragon_block is not None:
yield (spurious_dragon_block, SpuriousDragonTesterVM)
remaining_blocks_not_declared = (
homestead_start_block is None and
eip150_start_block is None
)
if spurious_dragon_block > 0 and remaining_blocks_not_declared:
yield (0, EIP150TesterVM)
if eip150_start_block is not None:
yield (eip150_start_block, EIP150TesterVM)
# If the EIP150 rules do not start at block 0 and homestead_start_block has not
# been configured for a specific block, configure homestead_start_block to start at
# block 0.
if eip150_start_block > 0 and homestead_start_block is None:
HomesteadTesterVM = BaseHomesteadTesterVM.configure(
dao_fork_block_number=0,
)
yield (0, HomesteadTesterVM)
if homestead_start_block is not None:
if dao_start_block is False:
# If dao_start_block support has explicitely been configured as `False` then
# mark the HomesteadTesterVM as not supporting the fork.
HomesteadTesterVM = BaseHomesteadTesterVM.configure(support_dao_fork=False)
elif dao_start_block is not None:
# Otherwise, if a specific dao_start_block fork block has been set, use it.
HomesteadTesterVM = BaseHomesteadTesterVM.configure(
dao_fork_block_number=dao_start_block,
)
else:
# Otherwise, default to the homestead_start_block block as the
# start of the dao_start_block fork.
HomesteadTesterVM = BaseHomesteadTesterVM.configure(
dao_fork_block_number=homestead_start_block,
)
yield (homestead_start_block, HomesteadTesterVM)
# If the homestead_start_block block is configured to start after block 0, set the
# frontier rules to start at block 0.
if homestead_start_block > 0:
yield (0, FrontierTesterVM)
BaseMainnetTesterChain = Chain.configure(
'MainnetTesterChain',
vm_configuration=_generate_vm_configuration()
)
class MainnetTesterChain(BaseMainnetTesterChain):
def validate_seal(self, block):
"""
We don't validate the proof of work seal on the tester chain.
"""
pass
def configure_forks(self,
homestead_start_block=None,
dao_start_block=None,
eip150_start_block=None,
spurious_dragon_block=None):
"""
TODO: add support for state_cleanup
"""
vm_configuration = _generate_vm_configuration(
homestead_start_block=homestead_start_block,
dao_start_block=dao_start_block,
eip150_start_block=eip150_start_block,
spurious_dragon_block=spurious_dragon_block,
)
self.vms_by_range = generate_vms_by_range(vm_configuration)
|
[
"pipermerriam@gmail.com"
] |
pipermerriam@gmail.com
|
95ab80f3fc1195c06b5ffcbf8b1f17dc0ef31ab7
|
291fe7fb4cc5b682e560b0c5958e2220054451c6
|
/Big41/ch4/조건문1.py
|
c57b278f55bd2e596e39e220b35dc5942d768772
|
[] |
no_license
|
MinksChung/BigdataCourse
|
44dc5e7e578515e1dafbb7870911e09347a788f4
|
293803415da5d9f354059ea556818cc7610f36a5
|
refs/heads/master
| 2022-12-22T06:14:59.880933
| 2020-01-26T14:58:09
| 2020-01-26T14:58:09
| 202,575,724
| 0
| 0
| null | 2022-12-15T23:28:43
| 2019-08-15T16:29:35
|
Python
|
UTF-8
|
Python
| false
| false
| 761
|
py
|
# print("파이썬 2번째 날입니다.")
# age = 100
#
# if age >= 20:
# print("성인입니다.")
# else:
# print("미성인입니다.")
# 비교연산자를 스트링 비교에 사용할 수 있다.
# 제어문에 중간괄호({ })대신 :(콜론)을 사용한다.
# :을 사용하면 반드시 다음 아래줄에 들여쓰기를 해야한다.
# 들여쓰기 후, 꼭 처리 내용이나 pass를 써주어야 한다.
# login_id = 'root'
# if login_id == 'root':
# print('로그인 ok')
# else:
# pass
jumsu = 88
if jumsu >= 90:
print('A학점')
elif jumsu >= 80:
print('B학점')
elif jumsu >= 70:
print('C학점')
elif jumsu >= 60:
print('D학점')
else:
print('F학점')
|
[
"minkschung@gmail.com"
] |
minkschung@gmail.com
|
0412325572d786d5ff622d57872828e3ac3f1281
|
d6c117812a618ff34055488337aaffea8cf81ca1
|
/scenes/Breakout_Clone.py
|
17e3ecd37edfce711d910432545e1bc7b564035f
|
[] |
no_license
|
c0ns0le/Pythonista
|
44829969f28783b040dd90b46d08c36cc7a1f590
|
4caba2d48508eafa2477370923e96132947d7b24
|
refs/heads/master
| 2023-01-21T19:44:28.968799
| 2016-04-01T22:34:04
| 2016-04-01T22:34:04
| 55,368,932
| 3
| 0
| null | 2023-01-22T01:26:07
| 2016-04-03T21:04:40
|
Python
|
UTF-8
|
Python
| false
| false
| 7,869
|
py
|
# -*- coding: utf-8 -*-
# https://gist.github.com/SebastianJarsve/5305895
# Created by Sebastian Jarsve
# 9. April 2013
from scene import *
from sound import play_effect
from random import randint
def centered_rect(x, y, w, h):
return Rect(x-w/2, y-h/2, w, h)
class Field(object):
def __init__(self):
size = screen_size
left = 0
bottom = 0
right = screen_size.w
top = screen_size.h
self.lines = [(left, bottom, left, top), (left, top, right, top), (right, top, right, bottom)]
def draw(self):
stroke_weight(4)
stroke(1,1,1)
for l in self.lines:
line(*l)
class Player(object):
def __init__(self):
self.rect = centered_rect(screen_size.w/2, 50, 100, 20)
self.lives = 3
def update(self):
self.rect.x += gravity().x * 50
self.rect.x = min(screen_size.w - 100, max(0, self.rect.x))
def draw(self):
fill(1,1,1)
rect(*self.rect)
class Ball(object):
def __init__(self):
self.rect = centered_rect(screen_size.w/2, 60, 20, 20)
self.vx = randint(-6, 6)
self.vy = 7
self.is_moving = False
def collide_with_paddle(self, paddle):
if self.rect.intersects(paddle.rect):
self.rect.y = paddle.rect.top()
self.vy *= -1
pos = self.rect.center().x - paddle.rect.center().x
self.vx = pos/10
play_effect('Jump_3')
def collide_with_block(self, block):
if self.rect.intersects(block.rect):
if self.rect.intersects(block.left):
self.rect.x = block.rect.left()-self.rect.w
self.vx = -abs(self.vx)
elif self.rect.intersects(block.right):
self.rect.x = block.rect.right()
self.vx = abs(self.vx)
elif self.rect.intersects(block.top):
self.rect.y = block.rect.top()
self.vy = abs(self.vy)
elif self.rect.intersects(block.bottom):
self.rect.y = block.rect.bottom()-self.rect.h
self.vy = -abs(self.vy)
return True
def update(self, dt):
self.rect.x += self.vx + dt*10
self.rect.y += self.vy + dt*10
if self.rect.right() >= screen_size.w:
self.rect.x = screen_size.w - self.rect.w
self.vx *= -1
play_effect('Jump_5')
if self.rect.left() <= 0:
self.rect.x = 0
self.vx *= -1
play_effect('Jump_5')
if self.rect.top() >= screen_size.h:
self.rect.y = screen_size.h - self.rect.w
self.vy *= -1
play_effect('Jump_5')
def draw(self):
fill(1,1,0)
no_stroke()
ellipse(*self.rect)
class Block(object):
def __init__(self, x, y, w, mode=1):
self.size = Size(w, 30)
self.rect = Rect(x, y, *self.size)
self.mode = mode
if self.mode > 1:
self.colour = (0.70, 0.70, 0.70)
else:
self.colour = (1,0,0)
top = self.rect.top()
left = self.rect.left()
right = self.rect.right()
bottom = self.rect.bottom()
self.left = Rect(left-5, bottom+5, 5, top-bottom-10)
self.right = Rect(right, bottom+5, 5, top-bottom-10)
self.bottom = Rect(left, bottom, right-left, 5)
self.top = Rect(left, top-5, right-left, 5)
def draw_sides(self):
fill(0,1,0)
rect(*self.left)
rect(*self.right)
rect(*self.top)
rect(*self.bottom)
def draw(self):
stroke_weight(1)
#no_stroke()
fill(*self.colour)
rect(*self.rect)
#self.draw_sides()
def random_level(n=7, t=13):
level = []
for i in range(n):
level.append([])
for j in range(t):
level[i].append(randint(0, 1))
return level
level = [
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0]],
[[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0]],
[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1],
[1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1],
[1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1],
[1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]],
[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 2, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]],
[[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 0, 0, 2, 2, 2]],
random_level()
]
class Game(Scene):
def setup(self):
self.level = 1
self.field = Field()
self.player = Player()
self.ball = Ball()
self.blocks = []
self.spawn_blocks()
def spawn_blocks(self):
self.solid_blocks = []
if self.level > len(level):
lvl = len(level)-1
else:
lvl = self.level-1
for y in range(len(level[lvl])):
for x in range(len(level[lvl][y])):
w = screen_size.w/len(level[lvl][y])
mode = level[lvl][y][x]
if level[lvl][y][x] == 1:
self.blocks.append(Block(x * w, screen_size.h - (y*30+90),
w, mode))
elif level[lvl][y][x] == 2:
self.solid_blocks.append(Block(x * w, screen_size.h - (y*30+90),
w, mode))
def draw(self):
removed_blocks = set()
text('Lives: {0}'.format(self.player.lives), x=screen_size.w-45, y=screen_size.h-40)
text('Level: {0}'.format(self.level), x=45, y=screen_size.h-45)
self.field.draw()
self.player.draw()
self.player.update()
self.ball.draw()
if self.ball.is_moving:
self.ball.update(self.dt)
self.ball.collide_with_paddle(self.player)
else:
self.ball.rect.center(self.player.rect.center().x, self.player.rect.top()+10)
self.ball.line = (0, 0, 0, 0)
if self.ball.rect.top() < 0:
self.player.lives -= 1
self.ball.is_moving = False
for block in self.blocks:
block.draw()
if self.ball.is_moving:
if self.ball.collide_with_block(block):
removed_blocks.add(block)
play_effect('Hit_3')
for solid_block in self.solid_blocks:
solid_block.draw()
if self.ball.is_moving:
if self.ball.collide_with_block(solid_block):
play_effect('Ding_1')
for removed_block in removed_blocks:
self.blocks.remove(removed_block)
if len(self.blocks) == 0:
self.ball.is_moving = False
self.level += 1
self.spawn_blocks()
if self.level >= len(level):
level[-1] = random_level()
self.spawn_blocks()
if self.player.lives == 0:
main_scene.switch_scene(GameOver())
def touch_began(self, touch):
if not self.ball.is_moving:
self.ball.is_moving = True
class GameOver(Scene):
def setup(self):
self.field = Field()
self.button = Button(Rect(screen_size.w/2-100, screen_size.h/2-50, 200, 100), 'Restart')
self.button.action = self.restart
self.add_layer(self.button)
def restart(self):
main_scene.switch_scene(Game())
def draw(self):
self.field.draw()
self.button.draw()
no_tint()
text('Game Over', x=screen_size.w/2, y=screen_size.h/4*3, font_size=64)
class MultiScene(Scene):
def __init__(self, start_scene):
self.active_scene = start_scene
run(self, PORTRAIT)
def switch_scene(self, new_scene):
self.active_scene = new_scene
self.setup()
def setup(self):
global screen_size
screen_size = self.size
self.active_scene.add_layer = self.add_layer
self.active_scene.size = self.size
self.active_scene.bounds = self.bounds
self.active_scene.setup()
def draw(self):
background(0.00, 0.25, 0.50)
self.active_scene.touches = self.touches
self.active_scene.dt = self.dt
self.active_scene.draw()
def touch_began(self, touch):
self.active_scene.touch_began(touch)
def touch_moved(self, touch):
self.active_scene.touch_moved(touch)
def touch_ended(self, touch):
self.active_scene.touch_ended(touch)
main_scene = MultiScene(Game())
|
[
"itdamdouni@gmail.com"
] |
itdamdouni@gmail.com
|
acafcee721e06c4272142c36a3deca0a91574319
|
7bededcada9271d92f34da6dae7088f3faf61c02
|
/pypureclient/flashblade/FB_2_2/models/object_store_access_policy_rule.py
|
c48033b9db845a618c81bd67851d063292b423c6
|
[
"BSD-2-Clause"
] |
permissive
|
PureStorage-OpenConnect/py-pure-client
|
a5348c6a153f8c809d6e3cf734d95d6946c5f659
|
7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e
|
refs/heads/master
| 2023-09-04T10:59:03.009972
| 2023-08-25T07:40:41
| 2023-08-25T07:40:41
| 160,391,444
| 18
| 29
|
BSD-2-Clause
| 2023-09-08T09:08:30
| 2018-12-04T17:02:51
|
Python
|
UTF-8
|
Python
| false
| false
| 5,091
|
py
|
# coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.2, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_2 import models
class ObjectStoreAccessPolicyRule(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'actions': 'list[str]',
'conditions': 'PolicyRuleObjectAccessCondition',
'effect': 'str',
'policy': 'FixedReference',
'resources': 'list[str]'
}
attribute_map = {
'name': 'name',
'actions': 'actions',
'conditions': 'conditions',
'effect': 'effect',
'policy': 'policy',
'resources': 'resources'
}
required_args = {
}
def __init__(
self,
name=None, # type: str
actions=None, # type: List[str]
conditions=None, # type: models.PolicyRuleObjectAccessCondition
effect=None, # type: str
policy=None, # type: models.FixedReference
resources=None, # type: List[str]
):
"""
Keyword args:
name (str): Name of the object (e.g., a file system or snapshot).
actions (list[str]): The list of actions granted by this rule. Each included action may restrict other properties of the rule. Supported actions are returned by the `/object-store-access-policy-actions` endpoint.
conditions (PolicyRuleObjectAccessCondition): Conditions used to limit the scope which this rule applies to.
effect (str): Effect of this rule. When `allow`, the rule allows the given actions to be performed on the given resources, subject to the given conditions. Valid values include `allow`.
policy (FixedReference): The policy to which this rule belongs.
resources (list[str]): The list of resources which this rule applies to. Each resource can include a bucket component, optionally followed by an object component. The choice of which components a resource can include is dictated by which actions are included in the rule. For further details, see the Object Store Access Policy Actions section of the User Guide.
"""
if name is not None:
self.name = name
if actions is not None:
self.actions = actions
if conditions is not None:
self.conditions = conditions
if effect is not None:
self.effect = effect
if policy is not None:
self.policy = policy
if resources is not None:
self.resources = resources
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ObjectStoreAccessPolicyRule`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ObjectStoreAccessPolicyRule, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ObjectStoreAccessPolicyRule):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"msholes@purestorage.com"
] |
msholes@purestorage.com
|
95ed3ebf9418039cb36fd809dcb13026d8ffb163
|
4ca6c75b8cfe7604827827aeee83b104fa7c93d9
|
/tests/apigateway/tests.py
|
a4c13a38821d83909c9775a29a08392a8dee52c7
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
ZextrasGiacomoMattiuzzi/gordon
|
3767800c64caf4444a13c15e890f84505b62296d
|
b6e4e8d5998c2b2c4c4edb3a2ec7124290f46e8b
|
refs/heads/master
| 2022-12-22T02:21:09.422648
| 2020-09-16T15:03:06
| 2020-09-16T15:03:06
| 285,250,476
| 0
| 1
|
NOASSERTION
| 2020-08-05T10:05:08
| 2020-08-05T10:05:08
| null |
UTF-8
|
Python
| false
| false
| 3,393
|
py
|
import os
import json
import boto3
import requests
from gordon.utils_tests import BaseIntegrationTest, BaseBuildTest
from gordon.utils import valid_cloudformation_name
from gordon import utils
class IntegrationTest(BaseIntegrationTest):
def test_0001_project(self):
self._test_project_step('0001_project')
self.assert_stack_succeed('p')
self.assert_stack_succeed('r')
lambda_ = self.get_lambda(utils.valid_cloudformation_name('pyexample:hellopy'))
self.assertEqual(lambda_['Runtime'], 'python2.7')
self.assertEqual(lambda_['Description'], 'My hello description')
self.assertEqual(lambda_['MemorySize'], 192)
self.assertEqual(lambda_['Timeout'], 123)
aliases = self.get_lambda_aliases(function_name=lambda_['FunctionName'])
self.assertEqual(list(aliases.keys()), ['current'])
response = self.invoke_lambda(
function_name=lambda_['FunctionName'],
payload={}
)
self.assert_lambda_response(response, 'hello')
lambda_ = self.get_lambda(utils.valid_cloudformation_name('pyexample:byepy'))
self.assertEqual(lambda_['Runtime'], 'python2.7')
self.assertEqual(lambda_['Description'], 'My bye description')
self.assertEqual(lambda_['MemorySize'], 192)
self.assertEqual(lambda_['Timeout'], 123)
aliases = self.get_lambda_aliases(function_name=lambda_['FunctionName'])
self.assertEqual(list(aliases.keys()), ['current'])
response = self.invoke_lambda(
function_name=lambda_['FunctionName'],
payload={}
)
self.assert_lambda_response(response, 'bye')
client = boto3.client('apigateway')
api = [a for a in client.get_rest_apis()['items'] if a['name'] == 'helloapi-{}'.format(self.uid)][0]
endpoint = 'https://{}.execute-api.{}.amazonaws.com/{}'.format(api['id'], os.environ['AWS_DEFAULT_REGION'], self.uid)
response = requests.get(endpoint)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content.decode('utf-8'), '"hello"')
response = requests.get('{}/404'.format(endpoint))
self.assertEqual(response.status_code, 404)
self.assertEqual(response.content.decode('utf-8'), '"hello"')
response = requests.get('{}/shop/2'.format(endpoint))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content.decode('utf-8'), '"hello"')
response = requests.get('{}/http'.format(endpoint))
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content.decode('utf-8'))['args'], {'hello': 'world'})
response = requests.get('{}/complex'.format(endpoint))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content.decode('utf-8'), '"hello"')
response = requests.post('{}/complex'.format(endpoint))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content.decode('utf-8'), '"bye"')
class BuildTest(BaseBuildTest):
def test_0001_project(self):
self.maxDiff = None
self._test_project_step('0001_project')
self.assertBuild('0001_project', '0001_p.json')
self.assertBuild('0001_project', '0002_pr_r.json')
self.assertBuild('0001_project', '0003_r.json')
|
[
"me@jorgebastida.com"
] |
me@jorgebastida.com
|
2dfd45cbd4308213e05b748eeca0e94c887cb457
|
b50f43c7c8cba1c0f349870596f12d1a333e6f42
|
/axonius_api_client/api/json_api/lifecycle.py
|
1b1fa1e637e4ac6cc877cd9a193ee9d9678de93e
|
[
"MIT"
] |
permissive
|
zahediss/axonius_api_client
|
190ca466e5de52a98af9b527a5d1c132fd8a5020
|
8321788df279ffb7794f179a4bd8943fe1ac44c4
|
refs/heads/master
| 2023-08-01T14:35:17.095559
| 2021-09-13T21:04:23
| 2021-09-13T21:04:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,301
|
py
|
# -*- coding: utf-8 -*-
"""Models for API requests & responses."""
import dataclasses
from typing import List, Optional, Type
import marshmallow_jsonapi
from .base import BaseModel, BaseSchema, BaseSchemaJson
class LifecycleSchema(BaseSchemaJson):
"""Pass."""
last_finished_time = marshmallow_jsonapi.fields.Str(allow_none=True)
last_start_time = marshmallow_jsonapi.fields.Str(allow_none=True)
next_run_time = marshmallow_jsonapi.fields.Number(allow_none=True)
status = marshmallow_jsonapi.fields.Str()
sub_phases = marshmallow_jsonapi.fields.List(marshmallow_jsonapi.fields.Dict())
tunnel_status = marshmallow_jsonapi.fields.Str()
class Meta:
"""Pass."""
type_ = "lifecycle_schema"
@staticmethod
def get_model_cls() -> type:
"""Pass."""
return Lifecycle
@dataclasses.dataclass
class Lifecycle(BaseModel):
"""Pass."""
last_finished_time: Optional[str] = None
last_start_time: Optional[str] = None
next_run_time: Optional[str] = None
status: Optional[str] = None
sub_phases: List[dict] = dataclasses.field(default_factory=list)
tunnel_status: Optional[str] = None
@staticmethod
def get_schema_cls() -> Optional[Type[BaseSchema]]:
"""Pass."""
return LifecycleSchema
|
[
"jimbosan@gmail.com"
] |
jimbosan@gmail.com
|
26d6ddad8285870df5057d6ccabf8e80206457e9
|
95495baeb47fd40b9a7ecb372b79d3847aa7a139
|
/test/test_network_address.py
|
d733d83fb937e590c374c7132fc314a75cc127e8
|
[] |
no_license
|
pt1988/fmc-api
|
b1d8ff110e12c13aa94d737f3fae9174578b019c
|
075f229585fcf9bd9486600200ff9efea5371912
|
refs/heads/main
| 2023-01-07T09:22:07.685524
| 2020-10-30T03:21:24
| 2020-10-30T03:21:24
| 308,226,669
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,192
|
py
|
# coding: utf-8
"""
Cisco Firepower Management Center Open API Specification
**Specifies the REST URLs and methods supported in the Cisco Firepower Management Center API. Refer to the version specific [REST API Quick Start Guide](https://www.cisco.com/c/en/us/support/security/defense-center/products-programming-reference-guides-list.html) for additional information.** # noqa: E501
OpenAPI spec version: 1.0.0
Contact: tac@cisco.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.network_address import NetworkAddress # noqa: E501
from swagger_client.rest import ApiException
class TestNetworkAddress(unittest.TestCase):
"""NetworkAddress unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testNetworkAddress(self):
"""Test NetworkAddress"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.network_address.NetworkAddress() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"pt1988@gmail.com"
] |
pt1988@gmail.com
|
62d0e0bb9ae6823f07924b56f20e211d04c91598
|
71dc727f9056934cd51692f8a3d26cf0dda44ef0
|
/sample-programs/week6/credit_card.py
|
c01ecf9751b6232aa66da9be53bfaadfc1851845
|
[
"MIT"
] |
permissive
|
justinclark-dev/CSC110
|
9d255020a50bbfdb195465c3e742dd2fcd61e3a4
|
d738ec33b757ba8fa9cf35b2214c184d532367a0
|
refs/heads/master
| 2022-12-08T08:08:30.667241
| 2020-09-04T01:05:34
| 2020-09-04T01:05:34
| 232,606,910
| 0
| 1
|
MIT
| 2020-09-04T02:05:47
| 2020-01-08T16:28:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,153
|
py
|
# credit_card.py
#
# How long will it take to pay off a credit card balance??
# This program makes use of a loop to perform a SIMULATION
# of a real-world situation.
# CSC 110
# Fall 2011
### Get and validate inputs
interest_multiplier = float(input('Enter an ANNUAL INTEREST RATE ' \
+ 'as a PERCENTAGE, >= zero: ')) / 1200.0
while interest_multiplier < 0:
interest_multiplier = float(input('TRY AGAIN -- annual ' \
+ 'rate must be >= zero: ')) / 1200.0
initial_balance = float(input('Enter an INITIAL ACCOUNT BALANCE ' \
+ 'in dollars, >= 100: '))
while initial_balance < 100:
initial_balance = float(input('TRY AGAIN -- initial balance ' \
+ 'must be >= 100: '))
payment = float(input('Enter the MONTHLY PAYMENT to be made, ' \
+ 'in dollars, >= 10: '))
while payment < 10:
payment = float(input('TRY AGAIN -- monthly payment ' \
+ 'must be >= 10: '))
### Simulate account changes until the account is paid in full
balance = initial_balance # initialize accumulator
months = 0 # initialize counter
total_payments = 0 # initialize accumulator;
# NOTICE that the loop continues as long as the balance is greater than
# zero, BUT not longer than 1200 months -- a condition necessary
# to prevent an infinite loop if the payment is too low.
while balance > 0 and months < 1200:
balance = balance + (balance * interest_multiplier)
balance -= payment
total_payments += payment
months += 1
# print(balance) # use to TRACE loop operation
years = months // 12 # integer division on purpose -- whole years only
months = months % 12
### Show results
print('\nAfter ' + str(years) + ' years and ' + str(months) + ' months')
if balance <= 0:
print('your debt is paid.')
total_payments += balance # corrects for any excess payment (balance <= 0)
print('\nTotal interest = $'
+ format((total_payments - initial_balance),',.2f') + '.')
else:
print('your debt is still not paid off!')
print('Remaining balance = $' + format(balance, ',.2f') + '.')
print('\nTotal payments = $' + format(total_payments, ',.2f') + '.\n')
|
[
"justinclark.dev@gmail.com"
] |
justinclark.dev@gmail.com
|
52ee52ca04897359b7e695a86c8052f993618c0d
|
53784d3746eccb6d8fca540be9087a12f3713d1c
|
/res/packages/scripts/scripts/client/messenger/proto/xmpp/jid.py
|
e203d8aed0708dffb0f2472e615a03256d991a33
|
[] |
no_license
|
webiumsk/WOT-0.9.17.1-CT
|
736666d53cbd0da6745b970e90a8bac6ea80813d
|
d7c3cf340ae40318933e7205bf9a17c7e53bac52
|
refs/heads/master
| 2021-01-09T06:00:33.898009
| 2017-02-03T21:40:17
| 2017-02-03T21:40:17
| 80,870,824
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 6,022
|
py
|
# 2017.02.03 21:53:47 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/messenger/proto/xmpp/jid.py
import random
import types
import time
from string import Template
from ConnectionManager import connectionManager
from ids_generators import SequenceIDGenerator
from messenger import g_settings
from messenger.proto.xmpp.xmpp_constants import XMPP_MUC_CHANNEL_TYPE
class BareJID(object):
__slots__ = ('_node', '_domain')
def __init__(self, jid = None):
super(BareJID, self).__init__()
self.setJID(jid)
def setJID(self, jid):
tail = ''
if not jid:
self._node, self._domain = ('', '')
elif type(jid) in types.StringTypes:
if jid.find('@') + 1:
self._node, jid = jid.split('@', 1)
self._node = self._node.lower()
else:
self._node = ''
if jid.find('/') + 1:
self._domain, tail = jid.split('/', 1)
else:
self._domain = jid
self._domain = self._domain.lower()
elif isinstance(jid, BareJID):
self._node, self._domain, tail = jid.getNode(), jid.getDomain(), jid.getResource()
else:
raise ValueError('JID can be specified as string or as instance of JID class.')
return tail
def getBareJID(self):
return self
def getNode(self):
return self._node
def setNode(self, node):
if node is None:
self._node = ''
if type(node) in types.StringTypes:
self._node = node.lower()
else:
self._node = node
return
def getDomain(self):
return self._domain
def setDomain(self, domain):
raise domain or AssertionError('Domain no empty')
self._domain = domain.lower()
def getResource(self):
return ''
def setResource(self, resource):
pass
def __str__(self):
if self._node:
jid = '{0}@{1}'.format(self._node, self._domain)
else:
jid = self._domain
return jid
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return self.__str__() == str(other)
def __ne__(self, other):
return not self.__eq__(other)
def __nonzero__(self):
return self.__str__() != ''
def __hash__(self):
return hash(self.__str__())
def __getstate__(self):
return str(self)
def __setstate__(self, state):
self.setJID(state)
class JID(BareJID):
__slots__ = ('_resource',)
def __init__(self, jid = None):
super(JID, self).__init__(jid)
def setJID(self, jid):
self._resource = super(JID, self).setJID(jid)
def getBareJID(self):
return BareJID(self)
def getResource(self):
return self._resource
def setResource(self, resource):
self._resource = resource or ''
def __str__(self):
jid = super(JID, self).__str__()
if self._resource:
jid = '{0}/{1}'.format(jid, self._resource)
return jid
class _DatabaseIDGetter(object):
def getDatabaseID(self):
value = getattr(self, '_node')
if value:
try:
result = long(value)
except ValueError:
result = 0
else:
result = 0
return result
class ContactBareJID(BareJID, _DatabaseIDGetter):
def __hash__(self):
return self.getDatabaseID()
class ContactJID(JID, _DatabaseIDGetter):
def getBareJID(self):
return ContactBareJID(self)
def __hash__(self):
return self.getDatabaseID()
def makeContactJID(dbID):
jid = ContactBareJID()
jid.setNode(long(dbID))
jid.setDomain(g_settings.server.XMPP.domain)
return jid
_counter = SequenceIDGenerator()
def makeUserRoomJID(room = ''):
jid = JID()
service = g_settings.server.XMPP.getChannelByType(XMPP_MUC_CHANNEL_TYPE.USERS)
if not service or not service['hostname']:
return jid
if not room:
room = 'user_room_{:08X}_{:08X}_{:04X}'.format(long(time.time()) & 4294967295L, random.randrange(1, 4294967295L), _counter.next())
jid.setNode(room)
jid.setDomain(service['hostname'])
return jid
def makeSystemRoomJID(room = '', channelType = XMPP_MUC_CHANNEL_TYPE.STANDARD):
"""
create jid for system room
:param room: room name if exist
:param channelType: channel type (XMPP_MUC_CHANNEL_TYPE)
:return: system room jid
"""
jid = JID()
service = g_settings.server.XMPP.getChannelByType(channelType)
if not service or not service['hostname']:
return jid
room = room or _getSystemChannelNameFormatter(service)
if not room:
return jid
jid.setNode(room)
jid.setDomain(service['hostname'])
return jid
def _getSystemChannelNameFormatter(service):
peripheryID = connectionManager.peripheryID
chanTemplate = Template(service['format'])
if chanTemplate:
return chanTemplate.safe_substitute(peripheryID=peripheryID, userString=service['userString'], hostname=service['hostname'], type=service['type'])
else:
return None
def makeClanRoomJID(clandDbId, channelType = XMPP_MUC_CHANNEL_TYPE.CLANS):
"""
create jid for clan room
:param room: room name if exist
:return: clan room jid
"""
jid = JID()
service = g_settings.server.XMPP.getChannelByType(channelType)
if not service or not service['hostname']:
return jid
clanTemplate = Template(service['format'])
room = clanTemplate.safe_substitute(clanDBID=clandDbId)
if not room:
return jid
jid.setNode(room)
jid.setDomain(service['hostname'])
return jid
# okay decompyling c:\Users\PC\wotsources\files\originals\res\packages\scripts\scripts\client\messenger\proto\xmpp\jid.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.02.03 21:53:47 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
d7a833d3cfc9b1389683b76c5f0ef9a016938987
|
78f3fe4a148c86ce9b80411a3433a49ccfdc02dd
|
/2017/09/wisc-gerrymandering-20170929/graphic_config.py
|
cb18764b6b447f7d4bc8090dc4075e9d99f838a3
|
[] |
no_license
|
nprapps/graphics-archive
|
54cfc4d4d670aca4d71839d70f23a8bf645c692f
|
fe92cd061730496cb95c9df8fa624505c3b291f8
|
refs/heads/master
| 2023-03-04T11:35:36.413216
| 2023-02-26T23:26:48
| 2023-02-26T23:26:48
| 22,472,848
| 16
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 304
|
py
|
#!/usr/bin/env python
import base_filters
COPY_GOOGLE_DOC_KEY = '1F_HDqo1D2EwXiUx95wG4QPRAvhm3P76OP6aXyv8NYgY'
USE_ASSETS = True
# Use these variables to override the default cache timeouts for this graphic
# DEFAULT_MAX_AGE = 20
# ASSETS_MAX_AGE = 300
JINJA_FILTER_FUNCTIONS = base_filters.FILTERS
|
[
"ahurt@npr.org"
] |
ahurt@npr.org
|
76dbaf4b89ba0644af07d6d0fb1df5416f4fe855
|
16631cf7cd4a70f2cd2750851649d3eff5e17724
|
/2015/day19/part1.py
|
20687aa134135898c045654106d8f4e7b87ac77d
|
[] |
no_license
|
kynax/AdventOfCode
|
1dd609a3308d733f2dd7d4ea00508d2da73180b9
|
36a339241dd7a31ebe08a73e5efa599e5faeea1a
|
refs/heads/master
| 2022-12-21T13:32:52.591068
| 2022-12-16T22:41:30
| 2022-12-16T22:41:30
| 48,439,585
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 891
|
py
|
import sys
mol = 'CRnSiRnCaPTiMgYCaPTiRnFArSiThFArCaSiThSiThPBCaCaSiRnSiRnTiTiMgArPBCaPMgYPTiRnFArFArCaSiRnBPMgArPRnCaPTiRnFArCaSiThCaCaFArPBCaCaPTiTiRnFArCaSiRnSiAlYSiThRnFArArCaSiRnBFArCaCaSiRnSiThCaCaCaFYCaPTiBCaSiThCaSiThPMgArSiRnCaPBFYCaCaFArCaCaCaCaSiThCaSiRnPRnFArPBSiThPRnFArSiRnMgArCaFYFArCaSiRnSiAlArTiTiTiTiTiTiTiRnPMgArPTiTiTiBSiRnSiAlArTiTiRnPMgArCaFYBPBPTiRnSiRnMgArSiThCaFArCaSiThFArPRnFArCaSiRnTiBSiThSiRnSiAlYCaFArPRnFArSiThCaFArCaCaSiThCaCaCaSiRnPRnCaFArFYPMgArCaPBCaPBSiRnFYPBCaFArCaSiAl'
#mol = 'ZZSiZZZZSiZSiZZSi'
#mol = 'HOHOHO'
res = []
repl = []
for line in sys.stdin:
words = line.split()
repl.append((words[0],words[2]))
c = 0
for (key,val) in repl:
cur = mol
pre = ''
while(key in cur):
i = cur.index(key)
out = pre + cur.replace(key,val,1)
pre += cur[:i+1]
cur = cur[i+1:]
res.append(out)
# if i == len(cur)-1:
# break
print(len(set(res)))
|
[
"guilemay@gmail.com"
] |
guilemay@gmail.com
|
1c35de4f84d46d2c3defc2f8ec85426949e56c02
|
97e60d0ca572d0dc3fc80f8719cd57a707ab6069
|
/bias_account_report_v6/__openerp__.py
|
6349817b455c8fc304e48597e462c7badb0b10a8
|
[] |
no_license
|
josepato/bias_trunk_v6
|
0c7c86493c88f015c049a139360478cabec7f698
|
b6ab6fc2ff3dc832f26effdba421bcc76d5cabac
|
refs/heads/master
| 2020-06-12T14:18:31.101513
| 2016-12-15T22:55:54
| 2016-12-15T22:55:54
| 75,803,957
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,724
|
py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Bias Account Custom Report',
'version': '1.0',
'category': 'Generic Modules/Account',
'description': """ Add custom report capabilities """,
'author': 'BIAS',
'depends': ['account', 'bias_fiscal_statements_v6'],
'update_xml': [
'security/ir.model.access.csv',
'custom_report_view.xml',
'custom_report_wizard.xml',
'financial_reports_wizard.xml',
'financial_reports_report.xml',
'custom_report_report.xml',
],
'installable': True,
'active': False,
# 'certificate': '0048234520147',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"josepato@hotmail.com"
] |
josepato@hotmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.