blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
edebe47b4cb17e60d34d08b33c6aff333614da02
|
105f8bb5f417248b2c56fec113746472cea94f5d
|
/slides/08_expressivity_optimization_generalization/images/make_relu_composition_figures.py
|
5f44f0cb8d4a7b2711e798edd969717a4a5c6ed6
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
m2dsupsdlclass/lectures-labs
|
659ce7d8f7a9eb94e776f16a10d0d1df3f037365
|
a41bdfde52081eaa615d86c46fceeae1c4b1d0cd
|
refs/heads/master
| 2023-06-01T20:30:17.669627
| 2022-12-07T11:20:05
| 2022-12-07T11:20:05
| 82,718,394
| 1,482
| 674
|
MIT
| 2022-03-10T21:34:29
| 2017-02-21T19:27:20
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 698
|
py
|
import matplotlib.pyplot as plt
import numpy as np
import os
# Generate the figures in the same folder
os.chdir(os.path.dirname(__file__))
def relu(x):
return np.maximum(x, 0)
def tri(x):
return relu(relu(2 * x) - relu(4 * x - 2))
x = np.linspace(-.3, 1.3, 1000)
plt.figure()
plt.ylim(-0.1, 1.1)
plt.plot(x, tri(x))
plt.savefig('triangle_x.svg')
plt.figure()
plt.ylim(-0.1, 1.1)
plt.plot(x, tri(tri(x)))
plt.savefig('triangle_triangle_x.svg')
plt.figure()
plt.ylim(-0.1, 1.1)
plt.plot(x, tri(tri(tri(x))))
plt.savefig('triangle_triangle_triangle_x.svg')
plt.figure()
plt.ylim(-0.1, 1.1)
plt.plot(x, tri(tri(tri(tri(x)))))
plt.savefig('triangle_triangle_triangle_triangle_x.svg')
|
[
"olivier.grisel@ensta.org"
] |
olivier.grisel@ensta.org
|
db1ef123f5567a57b504517e803bcc932fdfd182
|
a5b5c48cec36047b3db3dc29a1f1a311ac4b54fa
|
/tests/test_names.py
|
fb8e92f41ff9029b67eaf2cf58b2ffedd4311449
|
[
"MIT"
] |
permissive
|
Josef-Friedrich/dyndns
|
feea95e94bdc08aa6b45e0163d012fceffe910c2
|
a24e45d22ebff45eac04230992402d63ea289c8b
|
refs/heads/main
| 2023-08-05T12:43:59.254785
| 2023-07-26T06:53:45
| 2023-07-26T06:53:45
| 140,106,325
| 8
| 3
|
MIT
| 2023-07-26T06:53:46
| 2018-07-07T17:58:43
|
Python
|
UTF-8
|
Python
| false
| false
| 4,681
|
py
|
import unittest
from _helper import zones
from dyndns.exceptions import NamesError
from dyndns.names import Names, Zone, Zones, validate_hostname, validate_tsig_key
class TestFunctionValidateHostname(unittest.TestCase):
def assertRaisesMsg(self, hostname, msg):
with self.assertRaises(NamesError) as cm:
validate_hostname(hostname)
self.assertEqual(str(cm.exception), msg)
def test_valid(self):
self.assertEqual(
validate_hostname("www.example.com"),
"www.example.com.",
)
def test_invalid_tld(self):
self.assertRaisesMsg(
"www.example.777",
'The TLD "777" of the hostname "www.example.777" must be not '
"all-numeric.",
)
def test_invalid_to_long(self):
self.assertRaisesMsg(
"a" * 300,
'The hostname "aaaaaaaaaa..." is longer than 253 characters.',
)
def test_invalid_characters(self):
self.assertRaisesMsg(
"www.exämple.com",
'The label "exämple" of the hostname "www.exämple.com" is ' "invalid.",
)
class TestFunctionValidateTsigKey(unittest.TestCase):
def assertRaisesMsg(self, tsig_key, msg):
with self.assertRaises(NamesError) as cm:
validate_tsig_key(tsig_key)
self.assertEqual(str(cm.exception), msg)
def test_valid(self):
self.assertEqual(validate_tsig_key("tPyvZA=="), "tPyvZA==")
def test_invalid_empty(self):
self.assertRaisesMsg("", 'Invalid tsig key: "".')
def test_invalid_string(self):
self.assertRaisesMsg("xxx", 'Invalid tsig key: "xxx".')
class TestClassZone(unittest.TestCase):
def test_init(self):
zone = Zone("example.com", "tPyvZA==")
self.assertEqual(zone.zone_name, "example.com.")
self.assertEqual(zone.tsig_key, "tPyvZA==")
def test_method_split_fqdn(self):
zone = Zone("example.com", "tPyvZA==")
record_name, zone_name = zone.split_fqdn("www.example.com")
self.assertEqual(record_name, "www.")
self.assertEqual(zone_name, "example.com.")
def test_method_build_fqdn(self):
zone = Zone("example.com", "tPyvZA==")
fqdn = zone.build_fqdn("www")
self.assertEqual(fqdn, "www.example.com.")
class TestClassZones(unittest.TestCase):
def test_init(self):
zone = zones.zones["example.org."]
self.assertEqual(zone.zone_name, "example.org.")
self.assertEqual(zone.tsig_key, "tPyvZA==")
def test_method_get_zone_by_name(self):
zone = zones.get_zone_by_name("example.org")
self.assertEqual(zone.zone_name, "example.org.")
self.assertEqual(zone.tsig_key, "tPyvZA==")
def test_method_get_zone_by_name_raises(self):
with self.assertRaises(NamesError) as cm:
zones.get_zone_by_name("lol.org")
self.assertEqual(str(cm.exception), 'Unkown zone "lol.org.".')
class TestClassZonesMethodSplitNames(unittest.TestCase):
def test_with_dot(self):
result = zones.split_fqdn("www.example.com")
self.assertEqual(result, ("www.", "example.com."))
def test_with_org(self):
result = zones.split_fqdn("www.example.org")
self.assertEqual(result, ("www.", "example.org."))
def test_unkown_zone(self):
result = zones.split_fqdn("www.xx.org")
self.assertEqual(result, False)
def test_subzones(self):
zones = Zones(
[
{"name": "example.com.", "tsig_key": "tPyvZA=="},
{"name": "dyndns.example.com", "tsig_key": "tPyvZA=="},
]
)
result = zones.split_fqdn("lol.dyndns.example.com")
self.assertEqual(result, ("lol.", "dyndns.example.com."))
class TestClassNames(unittest.TestCase):
def setUp(self):
self.names = Names(zones=zones, fqdn="www.example.com")
def test_attribute_fqdn(self):
self.assertEqual(self.names.fqdn, "www.example.com.")
def test_attribute_zone_name(self):
self.assertEqual(self.names.zone_name, "example.com.")
def test_attribute_record_name(self):
self.assertEqual(self.names.record_name, "www.")
def test_attribute_tsig_key(self):
self.assertEqual(self.names.tsig_key, "tPyvZA==")
# class TestClassNamesRaises(unittest.TestCase):
#
# def assertRaisesMsg(self, kwargs, msg):
# with self.assertRaises(JfErr) as cm:
# Names(zones, **kwargs)
# self.assertEqual(str(cm.exception), msg)
#
# def test_no_kwargs(self):
# self.assertRaisesMsg({'record_name', 'lol'}, '')
if __name__ == "__main__":
unittest.main()
|
[
"josef@friedrich.rocks"
] |
josef@friedrich.rocks
|
b72eb40ddf04009f6bc497d9343288127410c544
|
75cf6a9fd035883b64ca2309382e0178cf370b43
|
/Empirical/python/sklearn/linear_model/plot_iris_logistic.py
|
924f1346cf00691abcd213789bd478f944b4bf65
|
[] |
no_license
|
ygtfrdes/Program
|
171b95b9f32a105185a7bf8ec6c8c1ca9d1eda9d
|
1c1e30230f0df50733b160ca73510c41d777edb9
|
refs/heads/master
| 2022-10-08T13:13:17.861152
| 2019-11-06T04:53:27
| 2019-11-06T04:53:27
| 219,560,170
| 1
| 2
| null | 2022-09-30T19:51:17
| 2019-11-04T17:39:52
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,822
|
py
|
#!/usr/bin/python
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
first two dimensions (sepal length and width) of the `iris
<https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The datapoints
are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
logreg = LogisticRegression(C=1e5, solver='lbfgs', multi_class='multinomial')
# Create an instance of Logistic Regression Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
h = .02 # step size in the mesh
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
|
[
"githubfortyuds@gmail.com"
] |
githubfortyuds@gmail.com
|
a8636ce25a63341b2bda20acac4e2f0e1b086b68
|
e70a17e8a37847a961f19b136f3bbe74393fa2af
|
/RPI/build/image_view/catkin_generated/generate_cached_setup.py
|
2fe0a0bf295843f90693461632443faef230492a
|
[
"MIT"
] |
permissive
|
Mondiegus/ROS-4x4-CAR-AI
|
1413ead6f46a8b16005abeea3e0b215caa45f27e
|
124efe39168ce96eec13d57e644f4ddb6dfe2364
|
refs/heads/Master
| 2023-07-14T23:56:53.519082
| 2021-03-27T17:28:45
| 2021-03-27T17:28:45
| 334,233,839
| 0
| 0
|
MIT
| 2021-02-02T13:00:30
| 2021-01-29T18:46:16
|
Makefile
|
UTF-8
|
Python
| false
| false
| 1,303
|
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/noetic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/noetic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in '/home/pi/catkin_ws/devel;/opt/ros/noetic'.split(';'):
python_path = os.path.join(workspace, 'lib/python3/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/pi/catkin_ws/devel/.private/image_view/env.sh')
output_filename = '/home/pi/catkin_ws/build/image_view/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
# print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
|
[
"Mondiegus9@gmail.com"
] |
Mondiegus9@gmail.com
|
3fb16f147771a0639471c589fa84a073965aed3d
|
fe3265b72e691c6df8ecd936c25b6d48ac33b59a
|
/homeassistant/components/devolo_home_network/entity.py
|
a26d8dce8f6fb23968697733741b152ad3eec79e
|
[
"Apache-2.0"
] |
permissive
|
bdraco/home-assistant
|
dcaf76c0967783a08eec30ce704e5e9603a2f0ca
|
bfa315be51371a1b63e04342a0b275a57ae148bd
|
refs/heads/dev
| 2023-08-16T10:39:15.479821
| 2023-02-21T22:38:50
| 2023-02-21T22:38:50
| 218,684,806
| 13
| 7
|
Apache-2.0
| 2023-02-21T23:40:57
| 2019-10-31T04:33:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,595
|
py
|
"""Generic platform."""
from __future__ import annotations
from typing import TypeVar
from devolo_plc_api.device import Device
from devolo_plc_api.device_api import (
ConnectedStationInfo,
NeighborAPInfo,
WifiGuestAccessGet,
)
from devolo_plc_api.plcnet_api import LogicalNetwork
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from .const import DOMAIN
_DataT = TypeVar(
"_DataT",
bound=(
LogicalNetwork
| list[ConnectedStationInfo]
| list[NeighborAPInfo]
| WifiGuestAccessGet
| bool
),
)
class DevoloEntity(CoordinatorEntity[DataUpdateCoordinator[_DataT]]):
"""Representation of a devolo home network device."""
_attr_has_entity_name = True
def __init__(
self,
entry: ConfigEntry,
coordinator: DataUpdateCoordinator[_DataT],
device: Device,
) -> None:
"""Initialize a devolo home network device."""
super().__init__(coordinator)
self.device = device
self.entry = entry
self._attr_device_info = DeviceInfo(
configuration_url=f"http://{device.ip}",
identifiers={(DOMAIN, str(device.serial_number))},
manufacturer="devolo",
model=device.product,
name=entry.title,
sw_version=device.firmware_version,
)
self._attr_unique_id = f"{device.serial_number}_{self.entity_description.key}"
|
[
"noreply@github.com"
] |
bdraco.noreply@github.com
|
bd105c34478ed631078af845e9908abee311a4d0
|
c2281d55883a51b2698119e3aeb843df9c8c885b
|
/Thesis ch 2/ClusteringBuckets/GenericModels/LogisticRegression/LogisticRegressionClassifier.py
|
a525596ace3707e35cafd39f7b5bbb875dd205da
|
[] |
no_license
|
akshitasawhney3008/Thesis-Final
|
1c004ffc6c2dd6ec711b212f9a35e46ea067c9c7
|
10865bab16bcc2ca4a5d4af345ffb4f2f7222104
|
refs/heads/master
| 2023-02-01T20:56:43.763024
| 2020-12-10T09:28:45
| 2020-12-10T09:28:45
| 320,037,411
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,871
|
py
|
import pickle
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import precision_score, recall_score, roc_auc_score, accuracy_score, matthews_corrcoef
import pandas as pd
# Configuration section
iter = 5
cvCount = 8
seed = 42
thresholdRange = np.linspace(start=0.40, stop=0.60, num=500)
# Load list of best parameters from Random Search
with open('ListOfBestParamsRS.pkl', 'rb') as f:
best_params = pickle.load(f)
def getPredictionsGivenThreshold(myMatrix, th):
myList = []
for i in range(myMatrix.shape[0]):
p1 = myMatrix[i, 1]
if p1 >= th:
myList.append(1)
else:
myList.append(0)
return np.asarray(myList)
path = "C://Users//Arushi//PycharmProjects//ThesisChap2//fixedBuckets(10)//"
thresholdList = []
precisionList = []
recallList = []
aucList = []
accuracyList = []
mcList = []
for threshold in thresholdRange:
print(threshold)
overallPrecision = 0
overallRecall = 0
overallAuauc = 0
overallAccuracy = 0
overallMc = 0
for i in range(iter):
X_train = np.load(path + 'final_train_binarydata_' + str(i) + '.npy').astype(float)
Y_train = np.load(path + 'final_train_labels_' + str(i) + '.npy').astype(float).astype(int)
X_test = np.load(path + 'final_test_binarydata_' + str(i) + '.npy').astype(float)
Y_test = np.load(path + 'final_test_labels_' + str(i) + '.npy').astype(float).astype(int)
bp = best_params[i]
clf = LogisticRegression(penalty=bp['penalty'], C=bp['C'],
solver=bp['solver'], max_iter=bp['max_iter'],
random_state=seed).fit(X_train, Y_train.ravel())
predictionsProb = clf.predict_proba(X_test)
np.savetxt('pp_lr' + str(i) + '.csv', predictionsProb, delimiter=',')
predictions = getPredictionsGivenThreshold(predictionsProb, threshold)
precision = precision_score(Y_test, predictions)
recall = recall_score(Y_test, predictions)
auroc = roc_auc_score(Y_test, predictionsProb[:, 1])
accuracy = accuracy_score(Y_test, predictions)
matthewsCoeff = matthews_corrcoef(Y_test, predictions)
overallPrecision += precision
overallRecall += recall
overallAuauc += auroc
overallAccuracy +=accuracy
overallMc += matthewsCoeff
thresholdList.append(threshold)
precisionList.append(overallPrecision / iter)
recallList.append(overallRecall / iter)
aucList.append(overallAuauc / iter)
accuracyList.append(overallAccuracy / iter)
mcList.append(overallMc / iter)
df = pd.DataFrame()
df['Threshold'] = thresholdList
df['Precision'] = precisionList
df['Recall'] = recallList
df['AUROC'] = aucList
df['Accuracy'] = accuracyList
df['MC'] = mcList
df.to_csv('Thresholding.csv', index=False)
print('Done')
|
[
"akshita17143@iiitd.ac.in"
] |
akshita17143@iiitd.ac.in
|
622a51454f770302f59c66c674d1b292207dda51
|
7aec3f10b07403b542e1c14a30a6e00bb479c3fe
|
/Codewars/7 kyu/Categorize New Member.py
|
55f0cbec80222ea4e7d8b24e7e53967b8d50837a
|
[] |
no_license
|
VictorMinsky/Algorithmic-Tasks
|
a5871749b377767176ba82308a6a0962e1b3e400
|
03a35b0541fe413eca68f7b5521eaa35d0e611eb
|
refs/heads/master
| 2020-08-02T23:18:06.876712
| 2020-01-16T19:08:49
| 2020-01-16T19:08:49
| 211,541,179
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,158
|
py
|
"""
The Western Suburbs Croquet Club has two categories of membership, Senior and Open. They would like your help with an application form that will tell prospective members which category they will be placed.
To be a senior, a member must be at least 55 years old and have a handicap greater than 7. In this croquet club, handicaps range from -2 to +26; the better the player the lower the handicap.
Input
Input will consist of a list of lists containing two items each. Each list contains information for a single potential member. Information consists of an integer for the person's age and an integer for the person's handicap.
Note for F#: The input will be of (int list list) which is a List<List>
Example Input
[[18, 20],[45, 2],[61, 12],[37, 6],[21, 21],[78, 9]]
Output
Output will consist of a list of string values (in Haskell: Open or Senior) stating whether the respective member is to be placed in the senior or open category.
Example Output
["Open", "Open", "Senior", "Open", "Open", "Senior"]
"""
def openOrSenior(data):
ans = []
for i in data:
ans.append('Open' if i[0] < 55 or i[1] <= 7 else 'Senior')
return ans
|
[
"panasyuk.vityu@gmail.com"
] |
panasyuk.vityu@gmail.com
|
4bd113348d198afab8b9cbb961f32431345be6c9
|
90ab150948d5bf8431548a99ce6230b59f2cdf3a
|
/invoice/migrations/0002_auto_20200619_1055.py
|
5909905d230aaec1c5defb77743e0d61a19b4988
|
[] |
no_license
|
fadhlimulyana20/halo_bisnis_api
|
a180768086211d36ea3253fa7db3968818ac97b5
|
20dd00973c1def65b3290f1a640fa0218381e2f8
|
refs/heads/master
| 2022-11-22T03:57:27.691220
| 2020-07-11T10:24:16
| 2020-07-11T10:24:16
| 278,766,376
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,011
|
py
|
# Generated by Django 3.0.7 on 2020-06-19 03:55
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('invoice', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='InvoiceProduct',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='invoiceporductitem',
name='invoice',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='invoice.InvoiceProduct'),
),
]
|
[
"baba.fadhli@gmail.com"
] |
baba.fadhli@gmail.com
|
c4a373608ae84b88523bef5559abaef2eae5e1af
|
336431aae640a29c22b723d4889e3a90bd62abb1
|
/tests/demoproject/demo/sample/admin.py
|
1df03211880776a0367394d714a88a32c7db733f
|
[
"Apache-2.0"
] |
permissive
|
csabadenes/unicef-snapshot
|
ef1a864e9f558d0fb18af5ea8d2a641192946592
|
2dcf6a8e15ff75566c168297f0f4194627bcb083
|
refs/heads/master
| 2020-04-25T18:02:30.771620
| 2018-08-08T16:36:09
| 2018-08-08T16:36:09
| 172,970,780
| 0
| 0
| null | 2019-02-27T18:48:55
| 2019-02-27T18:48:54
| null |
UTF-8
|
Python
| false
| false
| 288
|
py
|
from demo.sample.models import Author
from django.contrib import admin
from unicef_snapshot.admin import ActivityInline, SnapshotModelAdmin
class AuthorAdmin(SnapshotModelAdmin):
list_display = ('name', )
inlines = (ActivityInline, )
admin.site.register(Author, AuthorAdmin)
|
[
"greg@reinbach.com"
] |
greg@reinbach.com
|
cee68a32de153c1ce0e9544ae443df9f22c119b9
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/ec2_write_1/launch-template-version_delete.py
|
3d243ad877badd7d21340ef9b6563f7511c32dd3
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,169
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_one_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/delete-launch-template-versions.html
if __name__ == '__main__':
"""
create-launch-template-version : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/create-launch-template-version.html
describe-launch-template-versions : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-launch-template-versions.html
"""
parameter_display_string = """
# versions : The version numbers of one or more launch template versions to delete.
(string)
"""
add_option_dict = {}
#######################################################################
# parameter display string
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_one_parameter("ec2", "delete-launch-template-versions", "versions", add_option_dict)
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
57bd1d895445c2982d60ae8f2bcdf3f21b6857bb
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/database/exec_command_vo.py
|
dc9926f6c0697fd34e623775fb3e01dffcfd0a23
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,508
|
py
|
#!/usr/bin/python
class ExecCommandVo:
id = 0
name = ""
command_name = ""
parameter_num = 0
jmespath = ""
is_output = False
type = ""
execService_id = 0
description = ""
require_parameters = ""
def __init__(self, row):
self.id = int(row[0])
self.name = row[1]
self.command_name = row[2]
self.parameter_num = int(row[3])
self.jmespath = row[4]
self.is_output = bool(row[5])
self.type = row[6]
self.execService_id = int(row[7])
self.description = row[8]
self.require_parameters = row[9]
@classmethod
def get_select_query(cls):
return "id, name, command_name, parameter_num, jmespath, is_output, type, execService_id, description, require_parameters"
def getName(self):
return self.name
def getId(self):
return self.id
def getExecServiceId(self):
return self.execService_id
def getType(self):
return self.type
def getCommandName(self):
return self.command_name
def getParameterNum(self):
return self.parameter_num
def getJmespath(self):
return self.jmespath
def getDescription(self):
return self.description
def getRequireParameters(self):
return self.require_parameters
def __str__(self):
return "{} => {} {} {} {} {} {}".format(self.id, self.name, self.command_name, self.parameter_num, self.is_output, self.type, self.execService_id)
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
79489b9bd967a1add2a9b89db6a08cda65f6fa10
|
9889e7fd73314382fb2f9e8f63d92cf3254b75fb
|
/ThirdParty/Peridigm/test/verification/Compression_3x1x1_InfluenceFunction/np3/Compression_3x1x1_InfluenceFunction.py
|
d5676e6cfe92fcf1d1b4d2eb8cd6139f5825624c
|
[] |
no_license
|
bbanerjee/ParSim
|
0b05f43cff8e878658dc179b4a604eabd873f594
|
87f87816b146f40013a5e6648dfe20f6d2d002bb
|
refs/heads/master
| 2023-04-27T11:30:36.252023
| 2023-04-13T22:04:50
| 2023-04-13T22:04:50
| 13,608,512
| 16
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,903
|
py
|
#! /usr/bin/env python
import sys
import os
import re
from subprocess import Popen
test_dir = "Compression_3x1x1_InfluenceFunction/np3"
base_name = "Compression_3x1x1_InfluenceFunction"
if __name__ == "__main__":
result = 0
# log file will be dumped if verbose option is given
verbose = False
if "-verbose" in sys.argv:
verbose = True
# change to the specified test directory
os.chdir(test_dir)
# open log file
log_file_name = base_name + ".log"
if os.path.exists(log_file_name):
os.remove(log_file_name)
logfile = open(log_file_name, 'w')
# remove old output files, if any
# use regular expression module since differentiating
# between gold files and old output files can be tricky
files_to_remove = base_name + ".e"
for file in os.listdir(os.getcwd()):
if file in files_to_remove:
os.remove(file)
# run Peridigm
command = ["mpiexec", "-np", "3", "../../../../src/Peridigm", "../"+base_name+".xml"]
p = Popen(command, stdout=logfile, stderr=logfile)
return_code = p.wait()
if return_code != 0:
result = return_code
# compare output files against gold files
command = ["../../../../scripts/epu", "-p", "3", base_name]
p = Popen(command, stdout=logfile, stderr=logfile)
return_code = p.wait()
if return_code != 0:
result = return_code
command = ["../../../../scripts/exodiff", \
"-stat", \
"-f", \
"../"+base_name+".comp", \
base_name+".e", \
"../"+base_name+"_gold.e"]
p = Popen(command, stdout=logfile, stderr=logfile)
return_code = p.wait()
if return_code != 0:
result = return_code
logfile.close()
# dump the output if the user requested verbose
if verbose == True:
os.system("cat " + log_file_name)
sys.exit(result)
|
[
"b.banerjee.nz@gmail.com"
] |
b.banerjee.nz@gmail.com
|
b1e479091df62ec24625058db05cbe6459f76496
|
a222e2999251ba7f0d62c428ba8cc170b6d0b3b7
|
/Company_Con_past/DDCC_2016_Qualifing/C.py
|
bcf410cbd0806e1ff63d727adc1bd31342b6811b
|
[
"MIT"
] |
permissive
|
yosho-18/AtCoder
|
3e1f3070c5eb44f154c8104fbd5449f47446ce14
|
50f6d5c92a01792552c31ac912ce1cd557b06fb0
|
refs/heads/master
| 2020-06-02T10:21:29.458365
| 2020-05-29T12:40:48
| 2020-05-29T12:40:48
| 188,795,239
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,609
|
py
|
import sys
# import math, string, itertools, fractions, heapq, collections, re, array, bisect, copy, functools, random
# from collections import deque, defaultdict, Counter; from heapq import heappush, heappop
# from itertools import permutations, combinations, product, accumulate, groupby
# from bisect import bisect_left, bisect_right, insort_left, insort_right
# from operator import itemgetter as ig
sys.setrecursionlimit(10 ** 7)
inf = 10 ** 20; INF = float("INF"); ans = 0; tmp = 0; ansli = []; tmpli = []; candili = []; mod = 10 ** 9 + 7
# dd = [(-1, 0), (0, 1), (1, 0), (0, -1)]; ddn = dd + [(-1, 1), (1, 1), (1, -1), (-1, -1)]; ddn9 = ddn + [(0, 0)]
"""for dx, dy in dd:
nx = j + dx; ny = i + dy
if 0 <= nx < w and 0 <= ny < h:"""
def wi(): return list(map(int, sys.stdin.readline().split()))
def wip(): return [int(x) - 1 for x in sys.stdin.readline().split()]#WideIntPoint
def ws(): return sys.stdin.readline().split()
def i(): return int(sys.stdin.readline())
def s(): return input()
def hi(n): return [i() for _ in range(n)]
def hs(n): return [s() for _ in range(n)]#HeightString
def mi(n): return [wi() for _ in range(n)]#MatrixInt
def mip(n): return [wip() for _ in range(n)]
def ms(n): return [ws() for _ in range(n)]
n, c = wi()
L = hi(n)
L.sort()
K = sorted(L)
k = 0
while True:
for l in range(len(L) - 1, -1, -1):
if k == l:
ans += 1
print(ans)
exit()
if k > l:
print(ans)
exit()
if L[l] + K[k] + 1 <= c:
ans += 1
k += 1
else:
ans += 1
|
[
"44283410+wato18@users.noreply.github.com"
] |
44283410+wato18@users.noreply.github.com
|
8ecd323ed8eab418c79c9efc15c63141b501ee8c
|
0fccee4c738449f5e0a8f52ea5acabf51db0e910
|
/genfragments/ThirteenTeV/Zprime/ZprimeToBBbar_M_500_TuneCUETP8M1_13TeV_pythia8_cfi.py
|
b784115f04b91be013aae77247be97e4c644b5bc
|
[] |
no_license
|
cms-sw/genproductions
|
f308ffaf3586c19b29853db40e6d662e937940ff
|
dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4
|
refs/heads/master
| 2023-08-30T17:26:02.581596
| 2023-08-29T14:53:43
| 2023-08-29T14:53:43
| 11,424,867
| 69
| 987
| null | 2023-09-14T12:41:28
| 2013-07-15T14:18:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,333
|
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
comEnergy = cms.double(13000.0),
crossSection = cms.untracked.double(5.511e-5),
filterEfficiency = cms.untracked.double(1),
maxEventsToPrint = cms.untracked.int32(0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'NewGaugeBoson:ffbar2gmZZprime = on',
'Zprime:gmZmode = 3', # only pure Z' contribution
'32:m0 = 500',
'32:onMode = off', # switch off all of the Z' decay
'32:onIfAny = 5', # switch on the Z'->BBbar
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters')
)
)
ProductionFilterSequence = cms.Sequence(generator)
|
[
"ksung@cern.ch"
] |
ksung@cern.ch
|
15951dadb81a218712916a676485e8860d3655e9
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2635/60636/251230.py
|
272bd556cb478f51d8723a20d1005270dd20cb0d
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
def f(x):
count_2=0
count_5=0
for i in range(1,x+1):
a=i
while(a%2==0):
a=a/2
count_2=count_2+1
b=i
while(b%5==0):
b=b/5
count_5=count_5+1
return min(count_2,count_5)
res=[]
k=int(input())
i=0
while(True):
if(f(i)==k):
print(res)
elif(f(i)>k):
break
i=i+1
print(len(res))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
05a420e0ca727d9c90a5299c4100aa1377a32dac
|
35b6013c1943f37d1428afd2663c8aba0a02628d
|
/monitoring/opencensus/main.py
|
0330f1fd136e2e2ce6687901f2b6708829c58ee9
|
[
"Apache-2.0"
] |
permissive
|
GoogleCloudPlatform/python-docs-samples
|
d2a251805fbeab15d76ed995cf200727f63f887d
|
44e819e713c3885e38c99c16dc73b7d7478acfe8
|
refs/heads/main
| 2023-08-28T12:52:01.712293
| 2023-08-28T11:18:28
| 2023-08-28T11:18:28
| 35,065,876
| 7,035
| 7,593
|
Apache-2.0
| 2023-09-14T20:20:56
| 2015-05-04T23:26:13
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,753
|
py
|
"""
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
import random
import time
# [START monitoring_sli_metrics_opencensus_setup]
from flask import Flask
from opencensus.ext.prometheus import stats_exporter as prometheus
from opencensus.stats import aggregation as aggregation_module
from opencensus.stats import measure as measure_module
from opencensus.stats import stats as stats_module
from opencensus.stats import view as view_module
from opencensus.tags import tag_map as tag_map_module
from prometheus_flask_exporter import PrometheusMetrics
# [END monitoring_sli_metrics_opencensus_setup]
# set up measures
# [START monitoring_sli_metrics_opencensus_measure]
m_request_count = measure_module.MeasureInt(
"python_request_count", "total requests", "requests"
)
m_failed_request_count = measure_module.MeasureInt(
"python_failed_request_count", "failed requests", "requests"
)
m_response_latency = measure_module.MeasureFloat(
"python_response_latency", "response latency", "s"
)
# [END monitoring_sli_metrics_opencensus_measure]
# set up stats recorder
stats_recorder = stats_module.stats.stats_recorder
# [START monitoring_sli_metrics_opencensus_view]
# set up views
latency_view = view_module.View(
"python_response_latency",
"The distribution of the latencies",
[],
m_response_latency,
aggregation_module.DistributionAggregation(
[0, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000]
),
)
request_count_view = view_module.View(
"python_request_count",
"total requests",
[],
m_request_count,
aggregation_module.CountAggregation(),
)
failed_request_count_view = view_module.View(
"python_failed_request_count",
"failed requests",
[],
m_failed_request_count,
aggregation_module.CountAggregation(),
)
# register views
def register_all_views(view_manager: stats_module.stats.view_manager) -> None:
view_manager.register_view(latency_view)
view_manager.register_view(request_count_view)
view_manager.register_view(failed_request_count_view)
# [END monitoring_sli_metrics_opencensus_view]
# set up exporter
# [START monitoring_sli_metrics_opencensus_exporter]
def setup_openCensus_and_prometheus_exporter() -> None:
stats = stats_module.stats
view_manager = stats.view_manager
exporter = prometheus.new_stats_exporter(prometheus.Options(namespace="oc_python"))
view_manager.register_exporter(exporter)
register_all_views(view_manager)
# [END monitoring_sli_metrics_opencensus_exporter]
app = Flask(__name__)
metrics = PrometheusMetrics(app)
@app.route("/")
def homePage() -> (str, int):
# start timer
# [START monitoring_sli_metrics_opencensus_latency]
start_time = time.perf_counter()
# [START monitoring_sli_metrics_opencensus_counts]
mmap = stats_recorder.new_measurement_map()
# [END monitoring_sli_metrics_opencensus_latency]
# count request
mmap.measure_int_put(m_request_count, 1)
# fail 10% of the time
# [START monitoring_sli_metrics_opencensus_latency]
if random.randint(0, 100) > 90:
# [END monitoring_sli_metrics_opencensus_latency]
mmap.measure_int_put(m_failed_request_count, 1)
# [END monitoring_sli_metrics_opencensus_counts]
# [START monitoring_sli_metrics_opencensus_latency]
response_latency = time.perf_counter() - start_time
mmap.measure_float_put(m_response_latency, response_latency)
# [START monitoring_sli_metrics_opencensus_counts]
tmap = tag_map_module.TagMap()
mmap.record(tmap)
# [END monitoring_sli_metrics_opencensus_latency]
return ("error!", 500)
# [END monitoring_sli_metrics_opencensus_counts]
else:
random_delay = random.randint(0, 5000) / 1000
# delay for a bit to vary latency measurement
time.sleep(random_delay)
# record latency
response_latency = time.perf_counter() - start_time
mmap.measure_float_put(m_response_latency, response_latency)
tmap = tag_map_module.TagMap()
mmap.record(tmap)
return ("home page", 200)
if __name__ == "__main__":
setup_openCensus_and_prometheus_exporter()
app.run(port=8080)
|
[
"noreply@github.com"
] |
GoogleCloudPlatform.noreply@github.com
|
37aa35cf0f329661e957df6318327be084e1a169
|
55647a80c8b412af9df0ba3f50595cc2f29c25e6
|
/res/scripts/client/gui/shared/fortifications/fort_listener.py
|
32a43db5d6dcb68cebf92ebad77403b2096c27e1
|
[] |
no_license
|
cnsuhao/WOT-0.9.17-CT
|
0035eb6070fb4fab8d8ee9f8bbc676c10d511cfb
|
d1f932d8cabaf8aa21708622e87f83c8d24d6451
|
refs/heads/master
| 2021-06-08T18:11:07.039293
| 2016-11-19T19:12:37
| 2016-11-19T19:12:37
| null | 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 1,594
|
py
|
# 2016.11.19 19:52:40 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/shared/fortifications/fort_listener.py
from gui.shared.ClanCache import g_clanCache
from gui.shared.fortifications import interfaces
class fortProviderProperty(property):
def __get__(self, obj, objType = None):
return g_clanCache.fortProvider
class fortCtrlProperty(property):
def __get__(self, obj, objType = None):
provider = g_clanCache.fortProvider
ctrl = None
if provider:
ctrl = provider.getController()
return ctrl
class fortStateProperty(property):
def __get__(self, obj, objType = None):
provider = g_clanCache.fortProvider
state = None
if provider:
state = provider.getState()
return state
class FortListener(interfaces.IFortListener):
@fortProviderProperty
def fortProvider(self):
return None
@fortCtrlProperty
def fortCtrl(self):
return interfaces.IFortController()
@fortStateProperty
def fortState(self):
return None
def startFortListening(self):
provider = self.fortProvider
if provider:
provider.addListener(self)
def stopFortListening(self):
provider = self.fortProvider
if provider:
provider.removeListener(self)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\shared\fortifications\fort_listener.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.11.19 19:52:40 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
13659523e15ba66adc96db6742268b1e2ea4bd47
|
5e84763c16bd6e6ef06cf7a129bb4bd29dd61ec5
|
/blimgui/dist/OpenGL/GLX/OML/sync_control.py
|
904b62014908c17354efdbca4c660238438fba59
|
[
"MIT"
] |
permissive
|
juso40/bl2sdk_Mods
|
8422a37ca9c2c2bbf231a2399cbcb84379b7e848
|
29f79c41cfb49ea5b1dd1bec559795727e868558
|
refs/heads/master
| 2023-08-15T02:28:38.142874
| 2023-07-22T21:48:01
| 2023-07-22T21:48:01
| 188,486,371
| 42
| 110
|
MIT
| 2022-11-20T09:47:56
| 2019-05-24T20:55:10
|
Python
|
UTF-8
|
Python
| false
| false
| 748
|
py
|
'''OpenGL extension OML.sync_control
This module customises the behaviour of the
OpenGL.raw.GLX.OML.sync_control to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/OML/sync_control.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLX import _types, _glgets
from OpenGL.raw.GLX.OML.sync_control import *
from OpenGL.raw.GLX.OML.sync_control import _EXTENSION_NAME
def glInitSyncControlOML():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
|
[
"justin.sostmann@googlemail.com"
] |
justin.sostmann@googlemail.com
|
93a7e272213ba1df834a42e3302f2ce4cc6c579b
|
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
|
/python/programa-stic_barf-project/barf-project-master/barf/core/dbg/gdbdebugger.py
|
911aad2625d2131efb29b070c98b6fbfe34d3b30
|
[] |
no_license
|
LiuFang816/SALSTM_py_data
|
6db258e51858aeff14af38898fef715b46980ac1
|
d494b3041069d377d6a7a9c296a14334f2fa5acc
|
refs/heads/master
| 2022-12-25T06:39:52.222097
| 2019-12-12T08:49:07
| 2019-12-12T08:49:07
| 227,546,525
| 10
| 7
| null | 2022-12-19T02:53:01
| 2019-12-12T07:29:39
|
Python
|
UTF-8
|
Python
| false
| false
| 3,420
|
py
|
# Copyright (c) 2014, Fundacion Dr. Manuel Sadosky
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""GDB Debugger Interface.
"""
import gdb
from pybfd.bfd import Bfd
from barf.core.bi import Memory
from barf.core.dbg.debugger import Debugger
# TODO: See how to get this information from gdb.
def get_section_text_limits(filename):
"""Get setion .text limits.
"""
bfd = Bfd(filename)
section_name = ".text"
section = bfd.sections.get(section_name)
section_start = section.vma
section_end = section.vma + len(section.content) - 1
bfd.close()
return section_start, section_end
class GDBDebugger(Debugger):
"""GDB Debugger Interface.
"""
def __init__(self):
super(GDBDebugger, self).__init__()
def get_memory(self):
"""Get memory.
"""
inf = gdb.selected_inferior()
return Memory(inf.read_memory, inf.write_memory)
def get_architecture(self):
"""Get architecture.
"""
return "x86"
def get_registers(self):
"""Get registers.
"""
registers32 = ["eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi",
"eip"]
regs = {}
for reg in registers32:
regs[reg] = int(long(gdb.parse_and_eval("$" + reg)) & 0xffffffff)
return regs
def get_flags(self):
"""Get flags.
"""
flags32 = ["af", "cf", "of", "pf", "sf", "zf"]
eflags = str(gdb.parse_and_eval("$eflags"))[2:-2].lower().split(" ")
flags = {}
for flag in flags32:
if flag in eflags:
flags[flag] = 0x1
else:
flags[flag] = 0x0
return flags
def get_current_file(self):
"""Get current file name.
"""
return gdb.current_progspace().filename
def get_section_text_limits(self):
"""Get section .text limits.
"""
text, start, end = get_section_text(self.get_current_file())
self._section_text = text
self._section_text_start = start
self._section_text_end = end
return self._section_text_start, self._section_text_end
|
[
"659338505@qq.com"
] |
659338505@qq.com
|
a59cece9c7251f5a52b3709ee9e785dceec4b697
|
2923b9f58e6a143a3e070169612165585c301def
|
/LA/gp_rupture_test/LA/gp_rupture_test/gp_021219_Scott_7.35_noplas_GPU_2hz/plot_pgv.py
|
75c4de52fe39fe8ad4f2230c474f0b4f9aea77a9
|
[] |
no_license
|
hzfmer/summit_work_021421
|
16536dd716519bc9244da60007b9061ef5403429
|
6981b359fefb2af22e0bea6c47511de16cad22bd
|
refs/heads/master
| 2023-03-11T15:34:36.418971
| 2021-02-05T23:22:10
| 2021-02-05T23:22:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,962
|
py
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import os
import re
def discrete_cmap(N, base_cmap=None):
"""Create an N-bin discrete colormap from the specified input map"""
# Note that if base_cmap is a string or None, you can simply do
# return plt.cm.get_cmap(base_cmap, N)
# The following works for string, None, or a colormap instance:
base = plt.cm.get_cmap(base_cmap)
color_list = base(np.linspace(0, 1, N))
cmap_name = base.name + str(N)
return base.from_list(cmap_name, color_list, N)
M = re.findall("\d+\.\d+", os.getcwd().split('/')[-1])[0]
params = {'6.35': (354, 212), '7.35': (980, 240), '8.45': (5116, 220)}
nx, ny = 6320, 4200
nxf, nzf = params[M]
dx = 0.1
with open('cities_name.txt', 'r') as f_name:
cities_name = f_name.readlines()
cities_idx = dx * np.loadtxt('cities.idx', dtype='int')
vx = np.fromfile('./peak_velocity_H_01.0Hz.bin', dtype='float32').reshape((ny, nx))
vy = np.fromfile('./peak_velocity_Z_01.0Hz.bin', dtype='float32').reshape((ny, nx))
vx = np.flip(vx, 0)
vy = np.flip(vy, 0)
trace = np.fromfile('./fault_idx.bin', dtype='int32').reshape((nzf, nxf, 2))
fig, ax = plt.subplots(figsize=(6,6))
c = ax.imshow(vx, extent=[0, nx * dx, 0, ny * dx], cmap=discrete_cmap(20, 'RdBu_r'),
norm=LogNorm(vmin=0.01, vmax=3))
# c2 = ax[1].imshow(vy, extent=[0, nx * dx, 0, ny * dx], cmap='RdBu_r', vmax=10)
ax.scatter(trace[0, :, 0] * dx, trace[0, :, 1] * dx, 50, 'g', marker='*')
cb = plt.colorbar(c, ax=ax, format='%.2f', label='PGV (m/s)', orientation='horizontal')
ax.scatter(cities_idx[:, 0], cities_idx[:, 1], 20, 'w', marker='^')
for i in range(len(cities_name)):
ax.text(cities_idx[i, 0] - 45, cities_idx[i, 1] - 20, cities_name[i].strip('\n'), color='b')
plt.tight_layout()
ax.set_xlabel('X (km)')
ax.set_ylabel('Y (km)')
fig.savefig(f"PGV_with_trace_{M}.png", dpi=600, bbox_inches='tight', pad_inches=0.05)
|
[
"hzfmer94@gmail.com"
] |
hzfmer94@gmail.com
|
60b05258a7af383e9669549c72215b9efe7aec78
|
aaa762ce46fa0347cdff67464f56678ea932066d
|
/AppServer/lib/django-1.5/tests/regressiontests/model_formsets_regress/models.py
|
f94ad51929420a9df6641b04d954802d88f6f8ba
|
[
"BSD-3-Clause",
"LGPL-2.1-or-later",
"Apache-2.0",
"MIT",
"GPL-2.0-or-later",
"MPL-1.1"
] |
permissive
|
obino/appscale
|
3c8a9d8b45a6c889f7f44ef307a627c9a79794f8
|
be17e5f658d7b42b5aa7eeb7a5ddd4962f3ea82f
|
refs/heads/master
| 2022-10-01T05:23:00.836840
| 2019-10-15T18:19:38
| 2019-10-15T18:19:38
| 16,622,826
| 1
| 0
|
Apache-2.0
| 2022-09-23T22:56:17
| 2014-02-07T18:04:12
|
Python
|
UTF-8
|
Python
| false
| false
| 829
|
py
|
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class User(models.Model):
username = models.CharField(max_length=12, unique=True)
serial = models.IntegerField()
class UserSite(models.Model):
user = models.ForeignKey(User, to_field="username")
data = models.IntegerField()
class Place(models.Model):
name = models.CharField(max_length=50)
class Restaurant(Place):
pass
class Manager(models.Model):
retaurant = models.ForeignKey(Restaurant)
name = models.CharField(max_length=50)
class Network(models.Model):
name = models.CharField(max_length=15)
@python_2_unicode_compatible
class Host(models.Model):
network = models.ForeignKey(Network)
hostname = models.CharField(max_length=25)
def __str__(self):
return self.hostname
|
[
"root@lucid64.hsd1.ca.comcast.net"
] |
root@lucid64.hsd1.ca.comcast.net
|
7a3116c0966d70d7ce49d8c5048be8540ad7cea9
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startQiskit_Class2098.py
|
6d4cc286b7cbe8fb8b857dd6c11383572739cef7
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,072
|
py
|
# qubit number=4
# total number=34
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=20
prog.cz(input_qubit[0],input_qubit[3]) # number=21
prog.h(input_qubit[3]) # number=22
prog.cx(input_qubit[0],input_qubit[3]) # number=31
prog.x(input_qubit[3]) # number=32
prog.cx(input_qubit[0],input_qubit[3]) # number=33
prog.h(input_qubit[3]) # number=23
prog.cz(input_qubit[0],input_qubit[3]) # number=24
prog.h(input_qubit[3]) # number=25
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
prog.y(input_qubit[2]) # number=18
prog.z(input_qubit[3]) # number=28
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.cx(input_qubit[2],input_qubit[0]) # number=10
prog.h(input_qubit[1]) # number=19
prog.h(input_qubit[0]) # number=15
prog.cz(input_qubit[2],input_qubit[0]) # number=16
prog.h(input_qubit[0]) # number=17
prog.y(input_qubit[1]) # number=26
prog.y(input_qubit[1]) # number=27
prog.swap(input_qubit[1],input_qubit[0]) # number=29
prog.swap(input_qubit[1],input_qubit[0]) # number=30
# circuit end
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('statevector_simulator')
sample_shot =8000
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_Class2098.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
b8ee7d84333a2089a26b2d3ee0588cefa285e7f4
|
5989e503a733e8b29f4c502008446a75c2b43ff8
|
/src/geofr/forms/forms.py
|
3c0372717e90babdab4621efae6ea4bf114d1570
|
[] |
no_license
|
samuelpath/aides-territoires
|
399a6a7b0607ef5a8d2b327247446b239f5b1a42
|
5793bd49d7157a34e08c29e56a46e1e3ead0651f
|
refs/heads/master
| 2022-12-20T14:35:18.671563
| 2020-08-21T08:00:33
| 2020-08-21T08:00:33
| 288,424,578
| 0
| 0
| null | 2020-08-18T10:27:17
| 2020-08-18T10:27:16
| null |
UTF-8
|
Python
| false
| false
| 1,069
|
py
|
from django import forms
from django.contrib import admin
from django.contrib.admin.widgets import AutocompleteSelectMultiple
from django.utils.translation import ugettext_lazy as _
# from core.forms import AutocompleteSelectMultiple
from geofr.models import Perimeter
class PerimeterUploadForm(forms.Form):
city_list = forms.FileField(
label=_('City list'),
required=True)
class PerimeterCombineForm(forms.Form):
add_perimeters = forms.ModelMultipleChoiceField(
label=_('Perimeters to add'),
queryset=Perimeter.objects.all(),
widget=AutocompleteSelectMultiple(Perimeter._meta, admin.AdminSite()),
help_text=_('Select a list of perimeters to combines'))
rm_perimeters = forms.ModelMultipleChoiceField(
label=_('Perimeters to substract'),
required=False,
queryset=Perimeter.objects.all(),
widget=AutocompleteSelectMultiple(Perimeter._meta, admin.AdminSite()),
help_text=_('Those perimeters will be substracted from the '
'combined perimeters'))
|
[
"thibault@miximum.fr"
] |
thibault@miximum.fr
|
5f0bfc67ef67c011e059a43bcfd8dc2ead2c0091
|
6d233ad2059a941e4ce4c5b5ee3857b8a4a0d212
|
/Everyday_alg/2022/2022-01/01/convert-1d-array-into-2d-array.py
|
8ece2df0c75c81071c9834f30b59c191f075b716
|
[] |
no_license
|
Alexanderklau/Algorithm
|
7c38af7debbe850dfc7b99cdadbf0f8f89141fc6
|
eac05f637a55bfcc342fa9fc4af4e2dd4156ea43
|
refs/heads/master
| 2022-06-12T21:07:23.635224
| 2022-06-12T08:12:07
| 2022-06-12T08:12:07
| 83,501,915
| 5
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,474
|
py
|
# coding: utf-8
__author__ = 'Yemilice_lau'
"""
给你一个下标从 0 开始的一维整数数组 original 和两个整数 m 和 n 。
你需要使用 original 中 所有 元素创建一个 m 行 n 列的二维数组。
original 中下标从 0 到 n - 1 (都 包含 )的元素构成二维数组的第一行,
下标从 n 到 2 * n - 1 (都 包含 )的元素构成二维数组的第二行,依此类推。
请你根据上述过程返回一个 m x n 的二维数组。如果无法构成这样的二维数组,请你返回一个空的二维数组。
输入:original = [1,2,3,4], m = 2, n = 2
输出:[[1,2],[3,4]]
解释:
构造出的二维数组应该包含 2 行 2 列。
original 中第一个 n=2 的部分为 [1,2] ,构成二维数组的第一行。
original 中第二个 n=2 的部分为 [3,4] ,构成二维数组的第二行。
输入:original = [3], m = 1, n = 2
输出:[]
解释:
original 中只有 1 个元素。
无法将 1 个元素放满一个 1x2 的二维数组,所以返回一个空的二维数组。
"""
class Solution(object):
def construct2DArray(self, original, m, n):
"""
:type original: List[int]
:type m: int
:type n: int
:rtype: List[List[int]]
"""
if len(original) != (m * n):
return []
res = []
for i in range(0, len(original), n):
res.append(original[i:i+n])
return res
|
[
"429095816@qq.com"
] |
429095816@qq.com
|
a326515df2fc5497a6a153866ca631b50686fee8
|
64188fd40699e5f542c12190f0892b082d5d643b
|
/src/problem_63/solution.py
|
31314c0f53cc7a557a6306dacf386548b0d7dd4f
|
[] |
no_license
|
tyronedamasceno/Daily-Coding-Problem
|
9b7cd7a0f19762854986f5ab858a110353e81410
|
a34bebbe635c4a7c8fb7400ea11fd03e4c2dea4b
|
refs/heads/master
| 2020-04-30T00:51:18.020367
| 2019-06-11T18:47:11
| 2019-06-11T18:47:11
| 176,513,062
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,388
|
py
|
"""
Given a 2D matrix of characters and a target word, write a function that
returns whether the word can be found in the matrix by going left-to-right,
or up-to-down.
For example, given the following matrix:
[['F', 'A', 'C', 'I'],
['O', 'B', 'Q', 'P'],
['A', 'N', 'O', 'B'],
['M', 'A', 'S', 'S']]
and the target word 'FOAM', you should return true, since it's the leftmost
column. Similarly, given the target word 'MASS', you should return true,
since it's the last row.
"""
import unittest
from copy import deepcopy
def solve(matrix, word):
transp = deepcopy(matrix)
for i in range(len(matrix)):
for j in range(len(matrix)):
transp[i][j] = matrix[j][i]
for line in matrix:
if word in ''.join(line):
return True
for line in transp:
if word in ''.join(line):
return True
return False
class Tests(unittest.TestCase):
def test_example1(self):
matrix = [
['F', 'A', 'C', 'I'],
['O', 'B', 'Q', 'P'],
['A', 'N', 'O', 'B'],
['M', 'A', 'S', 'S']
]
self.assertTrue(solve(matrix, 'FOAM'))
def test_example2(self):
matrix = [
['F', 'A', 'C', 'I'],
['O', 'B', 'Q', 'P'],
['A', 'N', 'O', 'B'],
['M', 'A', 'S', 'S']
]
self.assertTrue(solve(matrix, 'MASS'))
|
[
"tyronedamasceno@gmail.com"
] |
tyronedamasceno@gmail.com
|
21fe9fffee018e8624644005eda4caea17cd4fd1
|
471b8043bedd60f73b532b5bd23aa19a32cb7d44
|
/where/models/site/nt_atm_loading.py
|
9637c4a6106b723c07da794f4b2e619a8b47a87a
|
[
"MIT"
] |
permissive
|
gahjelle/where
|
8be56137001eed20c55413549bbf43a5978b918c
|
f7d025c4fb36adb3e511c3a61b244263bbfd000d
|
refs/heads/master
| 2021-06-04T05:02:16.596468
| 2020-06-24T08:52:27
| 2020-06-24T08:52:27
| 136,427,535
| 0
| 0
|
MIT
| 2018-06-07T05:45:24
| 2018-06-07T05:45:23
| null |
UTF-8
|
Python
| false
| false
| 2,208
|
py
|
"""Apply non tidal atmospheric loading displacements
Description:
------------
"""
# External library imports
import numpy as np
# Midgard imports
from midgard.dev import plugins
# Where imports
from where import apriori
from where.data import position
@plugins.register
def non_tidal_atmospheric_loading(dset):
"""Apply non tidal atmospheric loading displacements at all stations.
Corrections are returned in meters in the Geocentric Celestial Reference System for each
observation. A Numpy array with 6 columns is returned, the first three columns are \f$ x, y, z \f$ for station 1,
while the last three columns are \f$ x, y, z \f$ for station 2.
Args:
dset: A Dataset containing model data.
Returns:
Numpy array: GCRS corrections in meters.
"""
ntapl = apriori.get("non_tidal_atmospheric_loading", time=dset.time)
data_out = list()
for _ in dset.for_each_suffix("station"):
data_out.append(non_tidal_atmospheric_loading_station(ntapl, dset))
return data_out
def non_tidal_atmospheric_loading_station(ntapl, dset):
"""Apply non tidal atmospheric loading displacements for a station field.
Corrections are returned in meters in the Geocentric
Celestial Reference System for each observation.
Args:
dset: A Dataset containing model data
Returns:
Numpy array: GCRS corrections in meters.
"""
lat, lon, _ = dset.site_pos.pos.llh.T
dup = ntapl["up"](dset.time, lon, lat)
deast = ntapl["east"](dset.time, lon, lat)
dnorth = ntapl["north"](dset.time, lon, lat)
denu = np.stack((deast, dnorth, dup), axis=1)
if position.is_position(dset.site_pos):
pos_correction = position.PositionDelta(denu, system="enu", ref_pos=dset.site_pos, time=dset.time)
elif position.is_posvel(dset.site_pos):
# set velocity to zero
denu = np.concatenate((denu, np.zeros(denu.shape)), axis=1)
pos_correction = position.PosVelDelta(denu, system="enu", ref_pos=dset.site_pos, time=dset.time)
else:
log.fatal(f"dset.site_pos{dset.default_field_suffix} is not a PositionArray or PosVelArray.")
return pos_correction.gcrs
|
[
"ask1982@yahoo.com"
] |
ask1982@yahoo.com
|
f84a1959d9738890de81d54ba34b897e6d9da60f
|
efe05b0ea0a11b50a42b81795b22b89724177180
|
/stratlib/sample_SMA_Live.py
|
b4e6ba3c014d13472410b233eb4a030b598b6027
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
dongtianqi1125/mooquant
|
b0fc5018f3aaf93ed69d8c249cccde7d8c98b8cb
|
244a87d4cd8b4d918eec4f16905e0921c3b39f50
|
refs/heads/master
| 2023-01-01T00:34:09.722476
| 2018-09-25T18:57:47
| 2018-09-25T18:57:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,161
|
py
|
from mooquant import bar, strategy
from mooquant.barfeed.tusharefeed import Feed
from mooquant.broker.backtesting import TradePercentage
from mooquant.broker.fillstrategy import DefaultStrategy
from mooquant.technical import cross, ma
class thrSMA(strategy.BacktestingStrategy):
def __init__(self, feed, instrument, short_l, mid_l, long_l, up_cum):
strategy.BacktestingStrategy.__init__(self, feed)
self.getBroker().setFillStrategy(DefaultStrategy(None))
self.getBroker().setCommission(TradePercentage(0.001))
self.__position = None
self.__instrument = instrument
self.__prices = feed[instrument].getPriceDataSeries()
self.__malength1 = int(short_l)
self.__malength2 = int(mid_l)
self.__malength3 = int(long_l)
self.__circ = int(up_cum)
self.__ma1 = ma.SMA(self.__prices, self.__malength1)
self.__ma2 = ma.SMA(self.__prices, self.__malength2)
self.__ma3 = ma.SMA(self.__prices, self.__malength3)
def getPrice(self):
return self.__prices
def getSMA(self):
return self.__ma1, self.__ma2, self.__ma3
def onEnterCanceled(self, position):
self.__position = None
def onEnterOK(self):
pass
def onExitOk(self, position):
self.__position = None
def onExitCanceled(self, position):
self.__position.exitMarket()
def buyCon1(self):
if cross.cross_above(self.__ma1, self.__ma2) > 0:
return True
def buyCon2(self):
m1 = 0
m2 = 0
for i in range(self.__circ):
if self.__ma1[-i - 1] > self.__ma3[-i - 1]:
m1 += 1
if self.__ma2[-i - 1] > self.__ma3[-i - 1]:
m2 += 1
if m1 >= self.__circ and m2 >= self.__circ:
return True
def sellCon1(self):
if cross.cross_below(self.__ma1, self.__ma2) > 0:
return True
def onBars(self, bars):
# If a position was not opened, check if we should enter a long
# position.
if self.__ma2[-1] is None:
return
if self.__position is not None:
if not self.__position.exitActive() and cross.cross_below(self.__ma1, self.__ma2) > 0:
self.__position.exitMarket()
if self.__position is None:
if self.buyCon1() and self.buyCon2():
shares = int(self.getBroker().getCash() * 0.2 / bars[self.__instrument].getPrice())
self.__position = self.enterLong(self.__instrument, shares)
print(bars[self.__instrument].getDateTime(),
bars[self.__instrument].getPrice())
# self.info("buy %s" % (bars.getDateTime()))
def runStratOnTushare(strat, paras, security_id, market, frequency):
liveFeed = Feed([security_id], frequency, 1024, 5)
strat = strat(liveFeed, security_id, *paras)
strat.run()
if __name__ == "__main__":
strat = thrSMA
security_id = '600848'
market = 'SH'
frequency = bar.Frequency.MINUTE
paras = [2, 20, 60, 10]
runStratOnTushare(strat, paras, security_id, market, frequency)
|
[
"travis@travis-ci.org"
] |
travis@travis-ci.org
|
14eefa052f00c116a9fc1c131e9f3b820d88dca3
|
94e06376dc265c7bf1a2e51acb9714d02b21503a
|
/scrapy项目/lianjia/lianjia/spiders/lianjia.py
|
d6bd34d3bf933a776b66ff1409338e227c37e0fd
|
[] |
no_license
|
zhangquanliang/python
|
4b2db32bed4e4746c8c49c309563f456dc41c6be
|
f45ef96e385b1cd6c5dfb53bf81042d953a9ec46
|
refs/heads/master
| 2021-04-26T23:30:12.217397
| 2019-03-20T06:18:14
| 2019-03-20T06:18:14
| 124,005,916
| 11
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,808
|
py
|
# -*- coding: utf-8 -*-
"""
Title = 深圳地区链家二手房
Date = 20180511
"""
import scrapy
import re
class LianjiaSpider(scrapy.Spider):
name = 'lianjia'
allowed_domains = ['sz.lianjia.com']
start_urls = ['https://sz.lianjia.com/ershoufang/']
def parse(self, response):
page_text = response.xpath("//div[@class='page-box house-lst-page-box']").extract_first()
page = re.findall('{"totalPage":(.*?),"curPage":1}', page_text, re.I | re.S)[0]
for i in range(1, int(page)+1):
url = 'https://sz.lianjia.com/ershoufang/pg{}/'.format(i)
yield scrapy.Request(url, callback=self.get_html)
def get_html(self, response):
from ..items import LianjiaItem
item = LianjiaItem()
title_ = response.xpath("//ul[@class='sellListContent']/li/div[@class='info clear']/div[1]/a/text()").extract()
houseIcon__ = response.xpath("//ul[@class='sellListContent']/li/div[@class='info clear']/div[2]/div[1]/a/text()").extract()
houseIcon_ = response.xpath("//ul[@class='sellListContent']/li/div[@class='info clear']/div[2]/div[1]/text()").extract()
positionInfo__ = response.xpath("//ul[@class='sellListContent']/li/div[@class='info clear']/div[3]/div[1]/text()").extract()
positionInfo_ = response.xpath("//ul[@class='sellListContent']/li/div[@class='info clear']/div[3]/div[1]/a/text()").extract()
totalPrice__ = response.xpath("//ul[@class='sellListContent']/li/div[@class='info clear']/div[6]/div[1]/span/text()").extract()
totalPrice_ = response.xpath("//ul[@class='sellListContent']/li/div[@class='info clear']/div[6]/div[1]/text()").extract()
unitPrice_ = response.xpath("//ul[@class='sellListContent']/li/div[@class='info clear']/div[6]/div[2]/span/text()").extract()
tag_ = response.xpath("//ul[@class='sellListContent']/li/div[@class='info clear']/div[1]").extract()
house_url_ = response.xpath("//ul[@class='sellListContent']//li/a/@href").extract()
for i in range(len(house_url_)):
title = title_[i]
houseIcon = houseIcon__[i] + houseIcon_[i]
positionInfo = positionInfo__[i] + positionInfo_[i]
totalPrice = totalPrice__[i] + totalPrice_[i]
unitPrice = unitPrice_[i]
tag = ""
reg = re.findall('<span class=".*?">(.*?)</span>', str(tag_[i]))
for j in range(len(reg)):
tag += reg[j] + '-'
house_url = house_url_[i]
item['title'] = title
item['houseIcon'] = houseIcon
item['positionInfo'] = positionInfo
item['totalPrice'] = totalPrice
item['unitPrice'] = unitPrice
item['tag'] = tag
item['house_url'] = house_url
yield item
|
[
"1007228376@qq.com"
] |
1007228376@qq.com
|
586e4336d107436cf72073569215a306a9734851
|
ba7be04fa897785fb9255df3ece0c1ffbead6acc
|
/part4_project/apps/model/serializers.py
|
2f62d0a55a38aefe870cbd68391f031e2e8ee4c9
|
[] |
no_license
|
backupalisher/part4_project
|
e1f402553502d010ffe974ecce73e313f90b8174
|
09ca16e3021aeac609fe6594e5c4f6c72832d112
|
refs/heads/master
| 2022-12-10T13:22:15.899332
| 2020-09-21T16:22:02
| 2020-09-21T16:22:02
| 233,295,277
| 0
| 0
| null | 2022-12-08T10:47:45
| 2020-01-11T20:49:10
|
Python
|
UTF-8
|
Python
| false
| false
| 347
|
py
|
from rest_framework import serializers
from db_model.models import Models
class ModelsCreateSerialize(serializers.ModelSerializer):
class Meta:
model = Models
depth = 1
fields = '__all__'
class ModelsListSerialize(serializers.ModelSerializer):
class Meta:
model = Models
fields = ('id', 'name')
|
[
"server.ares@gmail.com"
] |
server.ares@gmail.com
|
6d692836bd0bfc346bada188bc4423d0b09d1ba8
|
f5b5a6e3f844d849a05ff56c497638e607f940e0
|
/capitulo 08/08.20 - Programa 8.9 Validacao de inteiro usando funcao.py
|
0f9bb820f9ba6138690f0e16549b43e71a910b59
|
[] |
no_license
|
alexrogeriodj/Caixa-Eletronico-em-Python
|
9237fa2f7f8fab5f17b7dd008af215fb0aaed29f
|
96b5238437c88e89aed7a7b9c34b303e1e7d61e5
|
refs/heads/master
| 2020-09-06T21:47:36.169855
| 2019-11-09T00:22:14
| 2019-11-09T00:22:14
| 220,563,960
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 980
|
py
|
##############################################################################
# Parte do livro Introdução à Programação com Python
# Autor: Nilo Ney Coutinho Menezes
# Editora Novatec (c) 2010-2019
# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8
# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3
# Terceira edição - Janeiro/2019 - ISBN 978-85-7522-718-3
# Site: http://python.nilo.pro.br/
#
# Arquivo: listagem3\capítulo 08\08.20 - Programa 8.9 – Validação de inteiro usando função.py
# Descrição: Programa 8.9 – Validação de inteiro usando função
##############################################################################
# Programa 8.9 – Validação de inteiro usando função
def faixa_int(pergunta, mínimo, máximo):
while True:
v = int(input(pergunta))
if v < mínimo or v > máximo:
print(f"Valor inválido. Digite um valor entre {mínimo} e {máximo}")
else:
return v
|
[
"noreply@github.com"
] |
alexrogeriodj.noreply@github.com
|
cd45d516fa4909d89c82ab685f869e71e01a120e
|
0a6b950b3022dc1afbc1243be572e3dbe5b8b619
|
/src/dirbs/api/v1/schemas/__init__.py
|
c87fca545e3c94cf892eb650a9697be31308d4f3
|
[] |
no_license
|
yasirz/DIRBS-Core
|
dee56bb350b9d511bbdca87afa41bc1f022e534a
|
ac26dc97c57216dc3c1fed1e1b17aac27d3a1a2d
|
refs/heads/master
| 2023-03-19T01:27:14.390299
| 2021-02-24T11:41:34
| 2021-02-24T11:41:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,219
|
py
|
"""
DIRBS REST-ful API-V1 schemas package.
Copyright (c) 2018-2019 Qualcomm Technologies, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the
limitations in the disclaimer below) provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
- Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
- Neither the name of Qualcomm Technologies, Inc. nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
- The origin of this software must not be misrepresented; you must not claim that you wrote the original software.
If you use this software in a product, an acknowledgment is required by displaying the trademark/log as per the
details provided here: https://www.qualcomm.com/documents/dirbs-logo-and-brand-guidelines
- Altered source versions must be plainly marked as such, and must not be misrepresented as being the original
software.
- This notice may not be removed or altered from any source distribution.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY
THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
|
[
"awkhan978@gmail.com"
] |
awkhan978@gmail.com
|
4e4d46192a0364c4540cd99fa5cf08106f7a70ac
|
d94b6845aeeb412aac6850b70e22628bc84d1d6d
|
/gift/tasks/all_tasks.py
|
54650bf160cc8d90cd98fdb31716b5acf8b42044
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
ishine/google-research
|
541aea114a68ced68736340e037fc0f8257d1ea2
|
c1ae273841592fce4c993bf35cdd0a6424e73da4
|
refs/heads/master
| 2023-06-08T23:02:25.502203
| 2023-05-31T01:00:56
| 2023-05-31T01:06:45
| 242,478,569
| 0
| 0
|
Apache-2.0
| 2020-06-23T01:55:11
| 2020-02-23T07:59:42
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,120
|
py
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mapping of all defined tasks in the project.
task name --> task class.
"""
from gift.tasks import task
ALL_SINGLE_ENV_TASKS = {
'cls': task.ClassificationTask,
}
# TODO(samiraabnar): Refactor the desgin of the task and dataset classes.
ALL_MULTI_ENV_TASKS = {
'multi_env_cls': task.MultiEnvClassificationTask,
'multi_env_irm_cls': task.MultiEnvIRMClassificationTask,
'multi_env_vrex_cls': task.MultiEnvVRexClassificationTask,
}
ALL_MULTI_ENV_WITH_REPS = {
'multi_env_dm_cls':
task.MultiEnvLinearDomainMappingClassification,
'multi_env_nl_dm_cls':
task.MultiEnvNonLinearDomainMappingClassification,
'multi_env_hungarian_dm_cls':
task.MultiEnvHungarianDomainMappingClassification,
'multi_env_identity_dm_cls':
task.MultiEnvIdentityDomainMappingClassification,
'multi_env_sinkhorn_dm_cls':
task.MultiEnvSinkhornDomainMappingClassification,
}
ALL_MULTI_ENV_DOMAIN_ADVERSARIALS = {
'multi_env_dann_cls': task.MultiEnvDannClassification
}
ALL_TASKS = {}
ALL_TASKS.update(ALL_SINGLE_ENV_TASKS)
ALL_TASKS.update(ALL_MULTI_ENV_TASKS)
ALL_TASKS.update(ALL_MULTI_ENV_WITH_REPS)
ALL_TASKS.update(ALL_MULTI_ENV_DOMAIN_ADVERSARIALS)
def get_task_class(task_name):
"""Maps dataset name to a dataset_builder.
Args:
task_name: string; Name of the task.
Returns:
A dataset builder.
"""
if task_name not in ALL_TASKS.keys():
raise ValueError('Unrecognized task: {}'.format(task_name))
return ALL_TASKS[task_name]
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
cb9a2a0fca8de5d2ae056c8b91bac26518e8342a
|
caa7a39055c3451db43b39ffc5e70dc560749334
|
/manage.py
|
8abd1f05eae2546d4d038931d6197c57df4827c0
|
[] |
no_license
|
OneStage-NITW/website
|
da2438e3857c03a0c38fa6db6a33619b330a3e0d
|
af86e38560f16f70a0b74bcf2aeab4d855fbdc74
|
refs/heads/master
| 2016-08-12T15:17:14.577895
| 2015-05-31T18:10:52
| 2015-05-31T18:10:52
| 36,546,131
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "onestage_website.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"vivekhtc25@gmail.com"
] |
vivekhtc25@gmail.com
|
1a982b4c74818e20fa2025d49f804eda3fcc693d
|
70d39e4ee19154a62e8c82467ef75b601e584738
|
/devops/convert_pdf_to_csv.py
|
3c4fc08657b4068bd7a77779b52fc17cd0125452
|
[
"Apache-2.0"
] |
permissive
|
babywyrm/sysadmin
|
6f2724be13ae7e5b9372278856a8c072073beffb
|
2a5f3d29c7529bc917d4ff9be03af30ec23948a5
|
refs/heads/master
| 2023-08-16T03:50:38.717442
| 2023-08-16T03:05:55
| 2023-08-16T03:05:55
| 210,228,940
| 10
| 5
| null | 2023-05-01T23:15:31
| 2019-09-22T23:42:50
|
PowerShell
|
UTF-8
|
Python
| false
| false
| 494
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
######################
########################
##
##
######################
##
"""
Created on Wed Apr 2 11:20:13 2019
@author: Ganesh
"""
from tabula import read_pdf
import pandas as pd
FILE_NAME="" #Ppdf file as input
dest="" #destination csv file name
df= read_pdf(FILE_NAME)
x=df.values
data=pd.DataFrame(data=x[1:,1:],columns=x[0,1:])
data.to_csv(dest,sep=',',encoding="utf-8")
#########################################################
|
[
"noreply@github.com"
] |
babywyrm.noreply@github.com
|
47857b06bfc66ba41572248fc6902862e04111c6
|
9d82f8f47c0f95fda1bda4edac9eeee52843cc58
|
/unet/unet_model.py
|
16b7cd50a40a050254b4ee1151174d0137685263
|
[] |
no_license
|
dodler/cvpr-autonomous-drivng
|
7e2df21e367efc2467a36827ebbafc6799ee56a3
|
3b1eb786ebe93809ffb46dda3b36f213c83c6e4b
|
refs/heads/master
| 2020-03-10T02:01:58.104341
| 2018-05-14T10:09:55
| 2018-05-14T10:09:55
| 129,125,863
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,188
|
py
|
#!/usr/bin/python
# full assembly of the sub-parts to form the complete net
import torch
import torch.nn as nn
import torch.nn.functional as F
from config import RESIZE_TO
# python 3 confusing imports :(
from .unet_parts import *
class UNet(nn.Module):
def __init__(self, n_channels, n_classes):
super(UNet, self).__init__()
self.inc = inconv(n_channels, 64)
self.down1 = down(64, 128)
self.down2 = down(128, 256)
self.down3 = down(256, 512)
self.down4 = down(512, 512)
self.up1 = up(1024, 256)
self.up2 = up(512, 128)
self.up3 = up(256, 64)
self.up4 = up(128, 64)
self.outc = outconv(64, n_classes)
# self.upsample = torch.nn.Upsample(size=(RESIZE_TO, RESIZE_TO))
self.sigmoid = torch.nn.Sigmoid()
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = self.outc(x)
# x = self.upsample(x)
x = self.sigmoid(x)
return x
|
[
"tntlagf93@mail.ru"
] |
tntlagf93@mail.ru
|
7563d88374abff5479905e76854c5bd67ca6684d
|
53c4460e8cce123276932b4ddf2fe00fdee75b65
|
/list01.py
|
a62881597c3786dcf0b170d5d4673825cd0ad52d
|
[] |
no_license
|
Yush1nk1m/Study_Python
|
5ba8a6eeb73184ea7f1e892daae182b78d265e06
|
516f0ba6d9411453fa0d2df00314e383e3f8cabb
|
refs/heads/master
| 2023-07-09T16:22:22.663219
| 2021-08-22T15:22:22
| 2021-08-22T15:22:22
| 398,831,708
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 388
|
py
|
# 리스트를 선언합니다.
list_a = [1, 2, 3]
list_b = [4, 5, 6]
# 출력합니다.
print('# 리스트')
print('list_a =', list_a)
print('list_b =', list_b)
print()
# 기본 연산자
print('# 리스트 기본 연산자')
print('list_a + list_b =', list_a + list_b)
print('list_a * 3 =', list_a * 3)
print()
#함수
print('# 길이 구하기')
print('len(list_a) =', len(list_a))
|
[
"kys010306@sogang.ac.kr"
] |
kys010306@sogang.ac.kr
|
ff35b942362d9ab3ae11cf91ad3d8c1b2aedaa61
|
1af611cc68a47bb81521d8517a9dbc71777bd401
|
/exchanges/hotbit/page_objects/exchange_page.py
|
9402c08f8171bc532ace1039b19d6c04baf17a3d
|
[] |
no_license
|
BTCDIRECT/crypto_exchanges_connectors
|
d4e3f879bedfd4f0a7b238d2696df8bf261c8868
|
19b852c19ddd197959d30a69ffb773b238e19a49
|
refs/heads/master
| 2022-04-12T21:04:01.580622
| 2020-03-10T16:22:01
| 2020-03-10T16:22:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 939
|
py
|
import os
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
from exchanges.hotbit.page_objects.login_page import LoginPage
class ExchangePage(object):
def __init__(self, base, quote, driver=None):
self.url = 'https://www.hotbit.io/exchange?symbol=' + base + '/' + quote
if driver is not None:
self.driver = driver
else:
self.driver = webdriver.Chrome("/usr/lib/chromium-browser/chromedriver")
def open(self):
self.driver.get(self.url)
def click_login_button(self):
login_button = self.driver.find_element_by_css_selector('a[href="/login"]')
if login_button is not None:
login_button.click()
return LoginPage()
if __name__ == '__main__':
ep = ExchangePage('UBT', 'BTC')
ep.open()
ep.login()
|
[
"git@github.com"
] |
git@github.com
|
9b176b1706f27bc8f34005e2eaac0b741f9c37c0
|
18d51ac0a6ca14c8221c26f0dacd8d3721ca28e9
|
/120hun.py
|
00a1fb5b064d0784c178ad4c9c9bda3831921983
|
[] |
no_license
|
mahakalai/mahak
|
05f96d52880ed7b2e5eb70dd1dbf14fc533236e8
|
613be9df7743ef59b1f0e07b7df987d29bb23ec7
|
refs/heads/master
| 2020-04-15T05:01:58.541930
| 2019-07-15T16:28:32
| 2019-07-15T16:28:32
| 164,406,486
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
n=int(input())
l=list(map(int,input().split()))
c=0
for i in range(0,len(l)):
for j in range(i+1,len(l)):
l1=l[j+1::]
s=l[i]+l[j]
if s in l1:
c=c+1
print(c)
|
[
"noreply@github.com"
] |
mahakalai.noreply@github.com
|
875e6987c32bb08c0ca9d0065e78ee5f2038ec49
|
92237641f61e9b35ff6af6294153a75074757bec
|
/Algorithm/programmers/lv2/lv2_정렬_가장 큰 수.py
|
3f2b9046ee8b6b04b7555edf6157aed221f10806
|
[] |
no_license
|
taepd/study
|
8ded115765c4f804813e255d9272b727bf41ec80
|
846d3f2a5a4100225b750f00f992a640e9287d9c
|
refs/heads/master
| 2023-03-08T13:56:57.366577
| 2022-05-08T15:24:35
| 2022-05-08T15:24:35
| 245,838,600
| 0
| 1
| null | 2023-03-05T23:54:41
| 2020-03-08T15:25:15
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,681
|
py
|
"""
문제 설명
0 또는 양의 정수가 주어졌을 때, 정수를 이어 붙여 만들 수 있는 가장 큰 수를 알아내 주세요.
예를 들어, 주어진 정수가 [6, 10, 2]라면 [6102, 6210, 1062, 1026, 2610, 2106]를 만들 수 있고, 이중 가장 큰 수는 6210입니다.
0 또는 양의 정수가 담긴 배열 numbers가 매개변수로 주어질 때, 순서를 재배치하여 만들 수 있는 가장 큰 수를 문자열로 바꾸어 return 하도록 solution 함수를 작성해주세요.
제한 사항
numbers의 길이는 1 이상 100,000 이하입니다.
numbers의 원소는 0 이상 1,000 이하입니다.
정답이 너무 클 수 있으니 문자열로 바꾸어 return 합니다.
입출력 예
numbers return
[6, 10, 2] 6210
[3, 30, 34, 5, 9] 9534330
[40, 403] 40403
[0, 0, 0, 0, 0] 0
[21, 212] 21221
"""
"""
1~6 테스트 케이스 실패하는데, 반례를 찾지 못함
가장 큰 수의 길이만큼 특정 수를 그 수의 첫 번째 수로 채워넣은 뒤, 이를 원래 수와 리스트 형식으로
짝지어주고, 이를 활용해 다중 우선순위 정렬 후, 원래 수를 추출하는 전략
"""
def solution(numbers):
list = [[str(n)] for i, n in enumerate(numbers)]
m = len(str(max(numbers)))
for s in list:
s.append(s[0].ljust(m, s[0][0]))
print(list)
sorted_list = sorted(list, key=lambda x: (x[1], x[0]), reverse=True)
return str(int(''.join([s[0] for s in sorted_list])))
"""
내가 생각한 것의 간결 명쾌한 버전..
"""
def solution(numbers):
numbers = list(map(str, numbers))
numbers.sort(key = lambda x: x*3, reverse=True)
return str(int(''.join(numbers)))
|
[
"taepd1@gmail.com"
] |
taepd1@gmail.com
|
e26085bdb8d7faee4611a6600b4dbc4c569bd91a
|
08f89d88585fee98118fa1d42b59519d13ecbf17
|
/tests/hwsim/vm/parallel-vm.py
|
bb79c44e69d9dcc78e36a4cbb6e42b2cefa3b277
|
[
"BSD-3-Clause"
] |
permissive
|
rzr/wpa_supplicant
|
eb982f7b7d192999332c712734f8b33df04657c6
|
3f7ac05878ba965e941f2b5b80b8cb744e63f506
|
refs/heads/master
| 2021-01-18T10:37:36.701639
| 2014-10-13T07:40:08
| 2014-10-13T09:24:17
| 31,964,766
| 0
| 0
|
NOASSERTION
| 2019-08-07T21:56:49
| 2015-03-10T15:19:59
|
C
|
UTF-8
|
Python
| false
| false
| 3,655
|
py
|
#!/usr/bin/env python2
#
# Parallel VM test case executor
# Copyright (c) 2014, Jouni Malinen <j@w1.fi>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import fcntl
import os
import subprocess
import sys
import time
def main():
if len(sys.argv) < 2:
sys.exit("Usage: %s <number of VMs> [params..]" % sys.argv[0])
num_servers = int(sys.argv[1])
if num_servers < 1:
sys.exit("Too small number of VMs")
timestamp = int(time.time())
vm = {}
for i in range(0, num_servers):
print("\rStarting virtual machine {}/{}".format(i + 1, num_servers)),
cmd = ['./vm-run.sh', '--ext', 'srv.%d' % (i + 1),
'--split', '%d/%d' % (i + 1, num_servers)] + sys.argv[2:]
vm[i] = {}
vm[i]['proc'] = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
vm[i]['out'] = ""
vm[i]['err'] = ""
vm[i]['pos'] = ""
for stream in [ vm[i]['proc'].stdout, vm[i]['proc'].stderr ]:
fd = stream.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
print
while True:
running = False
updated = False
for i in range(0, num_servers):
if not vm[i]['proc']:
continue
if vm[i]['proc'].poll() is not None:
vm[i]['proc'] = None
continue
running = True
try:
err = vm[i]['proc'].stderr.read()
vm[i]['err'] += err
except:
pass
try:
out = vm[i]['proc'].stdout.read()
except:
continue
#print("VM {}: '{}'".format(i, out))
vm[i]['out'] += out
lines = vm[i]['out'].splitlines()
last = [ l for l in lines if l.startswith('START ') ]
if len(last) > 0:
try:
pos = last[-1].split(' ')[2]
vm[i]['pos'] = pos
updated = True
except:
pass
else:
vm[i]['pos'] = ''
if not running:
print("All VMs completed")
break
if updated:
status = {}
for i in range(0, num_servers):
if not vm[i]['proc']:
continue
status[i] = vm[i]['pos']
print status
time.sleep(1)
dir = '/tmp/hwsim-test-logs'
try:
os.mkdir(dir)
except:
pass
with open('{}/{}-parallel.log'.format(dir, timestamp), 'w') as f:
for i in range(0, num_servers):
f.write('VM {}\n{}\n{}\n'.format(i, vm[i]['out'], vm[i]['err']))
started = []
passed = []
failed = []
skipped = []
for i in range(0, num_servers):
lines = vm[i]['out'].splitlines()
started += [ l for l in lines if l.startswith('START ') ]
passed += [ l for l in lines if l.startswith('PASS ') ]
failed += [ l for l in lines if l.startswith('FAIL ') ]
skipped += [ l for l in lines if l.startswith('SKIP ') ]
if len(failed) > 0:
print "Failed test cases:"
for f in failed:
print f.split(' ')[1],
print
print("TOTAL={} PASS={} FAIL={} SKIP={}".format(len(started), len(passed), len(failed), len(skipped)))
if __name__ == "__main__":
main()
|
[
"j@w1.fi"
] |
j@w1.fi
|
4c09f85367aa3c9c504ea9f4d897a6d69a2e2cf1
|
09f0505f3ac1dccaf301c1e363423f38768cc3cc
|
/r_DailyProgrammer/Intermediate/C252/unittests/unittest.py
|
b51b46abe99ba600d2ab0f0d513db9d91e533c14
|
[] |
no_license
|
Awesome-Austin/PythonPractice
|
02212292b92814016d062f0fec1c990ebde21fe7
|
9a717f91d41122be6393f9fcd1a648c5e62314b3
|
refs/heads/master
| 2023-06-21T11:43:59.366064
| 2021-07-29T23:33:00
| 2021-07-29T23:33:00
| 270,854,302
| 0
| 0
| null | 2020-08-11T20:47:10
| 2020-06-08T23:24:09
|
Python
|
UTF-8
|
Python
| false
| false
| 267
|
py
|
#! python3
import unittest
from r_DailyProgrammer.Intermediate.C252.unittests.test_values import TEST_VALUES
class MyTestCase(unittest.TestCase):
def test_something(self):
self.assertEqual(True, False)
if __name__ == '__main__':
unittest.main()
|
[
"{ID}+{username}@users.noreply.github.com"
] |
{ID}+{username}@users.noreply.github.com
|
35f3320e82f072e78322d8ce608a900689ed71ac
|
3be8b5d0334de1f3521dd5dfd8a58704fb8347f9
|
/web/app/djrq/admin/mistags.py
|
a50df1551d6208dca29018445d8fbadbb2266e09
|
[
"MIT"
] |
permissive
|
bmillham/djrq2
|
21a8cbc3087d7ad46087cd816892883cd276db7d
|
5f357b3951600a9aecbe6c50727891b1485df210
|
refs/heads/master
| 2023-07-07T01:07:35.093669
| 2023-06-26T05:21:33
| 2023-06-26T05:21:33
| 72,969,773
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 791
|
py
|
# encoding: utf-8
from ..templates.admin.mistags import mistagstemplate
from web.ext.acl import when
class Mistags:
__dispatch__ = 'resource'
__resource__ = 'mistags'
def __init__(self, context, name, *arg, **args):
self._ctx = context
self.queries = context.queries
def get(self, *arg, **args):
if 'delete' in args:
self.queries.delete_mistag(args['delete'])
mistaglist = self._ctx.queries.get_mistags()
for r in mistaglist:
if r.title == r.song.title and \
r.artist == r.song.artist.fullname and \
r.album == r.song.album.fullname:
self._ctx.db.delete(r)
self._ctx.db.commit()
return mistagstemplate("Mistags", self._ctx, mistaglist)
|
[
"bmillham@gmail.com"
] |
bmillham@gmail.com
|
34bd2374972707c758f836f39b7f58724ea233bf
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/cGqjxKhNqZPZ76zac_24.py
|
de7e6b8845e790236633048386370486dccaae13
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 889
|
py
|
"""
Remember the game Battleship? Ships are floating in a matrix. You have to fire
torpedos at their suspected coordinates, to try and hit them.
Create a function that takes a list of lists (matrix) and a coordinate as a
string. If the coordinate contains only water `"."`, return `"splash"` and if
the coordinate contains a ship `"*"`, return `"BOOM"`.
### Examples
[
[".", ".", ".", "*", "*"],
[".", "*", ".", ".", "."],
[".", "*", ".", ".", "."],
[".", "*", ".", ".", "."],
[".", ".", "*", "*", "."],
]
fire(matrix, "A1") ➞ "splash"
fire(matrix, "A4") ➞ "BOOM"
fire(matrix, "D2") ➞ "BOOM"
### Notes
* The provided matrix is always a square.
* The provided matrix will not be larger than 5 * 5 ( A1 * E5).
"""
def fire(m, c):
return "splash" if m[ord(c[0]) - 65][int(c[1]) - 1] == "." else "BOOM"
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
306dde8ff8b6f8377634fd8dbb46bb6cf5fd85e6
|
786027545626c24486753351d6e19093b261cd7d
|
/ghidra9.2.1_pyi/ghidra/app/plugin/assembler/sleigh/__init__.pyi
|
06aa25039d317ada39fe4fc7a2ebed9a6d64d14f
|
[
"MIT"
] |
permissive
|
kohnakagawa/ghidra_scripts
|
51cede1874ef2b1fed901b802316449b4bf25661
|
5afed1234a7266c0624ec445133280993077c376
|
refs/heads/main
| 2023-03-25T08:25:16.842142
| 2021-03-18T13:31:40
| 2021-03-18T13:31:40
| 338,577,905
| 14
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 350
|
pyi
|
from . import expr as expr
from . import grammars as grammars
from . import parse as parse
from . import sem as sem
from . import symbol as symbol
from . import tree as tree
from . import util as util
from .SleighAssembler import SleighAssembler as SleighAssembler
from .SleighAssemblerBuilder import SleighAssemblerBuilder as SleighAssemblerBuilder
|
[
"tsunekou1019@gmail.com"
] |
tsunekou1019@gmail.com
|
911de7881b47cc1852f47d65dbed33605ea1dae8
|
07504838d12c6328da093dce3726e8ed096cecdb
|
/pylon/resources/properties/maxRcvTime.py
|
2953f5d58661222f607dd076848ff88ea03fa2ea
|
[] |
no_license
|
lcoppa/fiat-lux
|
9caaa7f3105e692a149fdd384ec590676f06bf00
|
7c166bcc08768da67c241078b397570de159e240
|
refs/heads/master
| 2020-04-04T02:47:19.917668
| 2013-10-10T10:22:51
| 2013-10-10T10:22:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,387
|
py
|
"""maxRcvTime standard property type, originally defined in resource file set
standard 00:00:00:00:00:00:00:00-0."""
# Copyright (C) 2013 Echelon Corporation. All Rights Reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software" to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# This file is generated from device resource files using an automated
# database to source code conversion process. Grammar and punctuation within
# the embedded documentation may not be correct, as this data is gathered and
# combined from several sources. The machine-generated code may not meet
# compliance with PEP-8 and PEP-257 recommendations at all times.
# Generated at 23-Sep-2013 09:14.
import pylon.resources.datapoints.time_sec
from pylon.resources.standard import standard
class maxRcvTime(pylon.resources.datapoints.time_sec.time_sec):
"""maxRcvTime standard property type. Maximum receive time. The maximum
period of time that may expire with no updates on the associated input
network variables before the object goes into heartbeat failure mode. A
zero value disables."""
def __init__(self):
super().__init__(
)
self._default_bytes = b'\x00\x00'
self._original_name = 'SCPTmaxRcvTime'
self._property_scope, self._property_key = 0, 48
self._definition = standard.add(self)
if __name__ == '__main__':
# unit test code.
item = maxRcvTime()
pass
|
[
"lcoppa@rocketmail.com"
] |
lcoppa@rocketmail.com
|
ecc1d7540dc02c0544f633704a1a9bc3a5f5e92d
|
b66ec155356c11cabe2350a583f03dd6fad7f105
|
/scripts/generate_sample_file.py
|
2f4e67bb29a8e0027ed2c6d731c82d62d6540d7f
|
[
"MIT",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
ONSdigital/response-operations-ui
|
823344b88d71ddadb36ccc1e7ca2fbd556456e92
|
c0e37ac87ca8ba8ae0433d0222b3d7b4ff1c2cbd
|
refs/heads/main
| 2023-08-18T08:18:44.118075
| 2023-08-02T10:43:41
| 2023-08-02T10:43:41
| 112,603,785
| 4
| 2
|
MIT
| 2023-09-14T10:35:46
| 2017-11-30T11:31:09
|
Python
|
UTF-8
|
Python
| false
| false
| 287
|
py
|
f = open("sample.csv", "a")
for i in range(5):
f.write(
f"499300{i:05}:F:50300:50300:45320:45320:8478:801325:9900000576:1:E:FE:01/09/1993:ENTNAME1_COMPANY1:"
f"ENTNAME2_COMPANY1::RUNAME1_COMPANY1:RUNNAME2_COMPANY1::TOTAL UK ACTIVITY:::C:D:7:0001:S\n"
)
f.close()
|
[
"noreply@github.com"
] |
ONSdigital.noreply@github.com
|
5acce732d50f502803e97c8c160c43ebfa86612f
|
bfbe642d689b5595fc7a8e8ae97462c863ba267a
|
/bin/Python27/Lib/site-packages/pylint/test/input/func_format.py
|
a4eed23cce1b18826301e55835ef0c93e6815fcb
|
[
"LicenseRef-scancode-other-permissive",
"MIT"
] |
permissive
|
mcanthony/meta-core
|
0c0a8cde1669f749a4880aca6f816d28742a9c68
|
3844cce391c1e6be053572810bad2b8405a9839b
|
refs/heads/master
| 2020-12-26T03:11:11.338182
| 2015-11-04T22:58:13
| 2015-11-04T22:58:13
| 45,806,011
| 1
| 0
| null | 2015-11-09T00:34:22
| 2015-11-09T00:34:22
| null |
UTF-8
|
Python
| false
| false
| 1,731
|
py
|
# pylint:disable=C0103,W0104,W0105
"""Check format
"""
__revision__ = ''
notpreceded= 1
notfollowed =1
notfollowed <=1
correct = 1
correct >= 1
def func(arg, arg2):
"""test named argument
"""
func(arg=arg+1,
arg2=arg2-arg)
aaaa,bbbb = 1,2
aaaa |= bbbb
aaaa &= bbbb
if aaaa: pass
else:
aaaa,bbbb = 1,2
aaaa,bbbb = bbbb,aaaa
bbbb = (1,2,3)
aaaa = bbbb[1:]
aaaa = bbbb[:1]
aaaa = bbbb[:]
aaaa = {aaaa:bbbb}
# allclose(x,y) uses |x-y|<ATOL+RTOL*|y|
"""docstring,should not match
isn't it:yes!
a=b
"""
aaaa = 'multiple lines\
string,hehehe'
boo = 2 # allclose(x,y) uses |x-y|<ATOL+RTOL*|y|
def other(funky):
"""yo, test formatted result with indentation"""
funky= funky+2
html = """<option value="=">ist genau gleich</option>
yo+=4
"""
html2 = """<option value='='>ist genau gleich</option>
yo+=4
"""
func('''<body>Hello
</body>''', 0)
assert boo <= 10, "Note is %.2f. Either you cheated, or pylint's \
broken!" % boo
def _gc_debug(gcc):
"""bad format undetected w/ py2.5"""
ocount = {}
for obj in gcc.get_objects():
try:
ocount[obj.__class__]+= 1
except KeyError:
ocount[obj.__class__]=1
except AttributeError:
pass
def hop(context):
"""multi-lines string"""
return ['''<a id="sendbutton" href="javascript: $('%(domid)s').submit()">
<img src="%(sendimgpath)s" alt="%(send)s"/>%(send)s</a>''' % context,
'''<a id="cancelbutton" href="javascript: history.back()">
<img src="%(cancelimgpath)s" alt="%(cancel)s"/>%(cancel)s</a>''' % context,
]
titreprojet = '<tr><td colspan="10">\
<img src="images/drapeau_vert.png" alt="Drapeau vert" />\
<strong>%s</strong></td></tr>' % aaaa
|
[
"kevin.m.smyth@gmail.com"
] |
kevin.m.smyth@gmail.com
|
39c38cd5e8854236c9e077276699086fb28a72dc
|
7bc54bae28eec4b735c05ac7bc40b1a8711bb381
|
/src/dataset_specific/mnli/parsing_jobs/read_data_fn.py
|
da7fb65fc85bf09e27a79a9a17f912738e950ea6
|
[] |
no_license
|
clover3/Chair
|
755efd4abbd5f3f2fb59e9b1bc6e7bc070b8d05e
|
a2102ebf826a58efbc479181f1ebb5de21d1e49f
|
refs/heads/master
| 2023-07-20T17:29:42.414170
| 2023-07-18T21:12:46
| 2023-07-18T21:12:46
| 157,024,916
| 0
| 0
| null | 2023-02-16T05:20:37
| 2018-11-10T21:55:29
|
Python
|
UTF-8
|
Python
| false
| false
| 314
|
py
|
from typing import Iterator
from dataset_specific.mnli.parsing_jobs.partition_specs import get_mnli_spacy_ps
from dataset_specific.mnli.parsing_jobs.run_spacy import NLIPairDataSpacy
def read_spacy_nli(split) -> Iterator[NLIPairDataSpacy]:
pds = get_mnli_spacy_ps(split)
return pds.read_pickles_as_itr()
|
[
"lesterny@gmail.com"
] |
lesterny@gmail.com
|
cdd1cda3afaf7c3e8dae492fadf1f64db86326fb
|
651d77155bcb104d76fba163060b1d62fc4923f9
|
/ScholarConfig/__init__.py
|
aa3569dfd7e31e192d3a6856e79b878aa1f0cca6
|
[] |
no_license
|
leisun123/scholar-private
|
908f0ae5c95b6644bb157587007dfbb5e42d5928
|
81b93f6f16d2233419894cf45bdf34883f8b0cd2
|
refs/heads/master
| 2020-03-22T18:31:49.072513
| 2019-02-18T07:50:34
| 2019-02-18T07:50:34
| 140,465,515
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 196
|
py
|
#coding:utf-8
"""
@file: __init__.py
@author: IsolationWyn
@contact: genius_wz@aliyun.com
@python: 3.5.2
@editor: PyCharm
@create: 2017/7/1 5:40
@description:
--
"""
|
[
"genius_wz@aliyun.com"
] |
genius_wz@aliyun.com
|
038d3954ec78ca6127f92666a6ba6b7a18a07af7
|
527e3ca2c316f7713ecf92faac58cd33cfaacb81
|
/data_structures/sqlite_dict.py
|
9c4f405aeb82b931153db26dce22023b357f4f91
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
aaw/yaupon
|
1bade3ecd61f11468cb7ff7cd131ecdb1031aee7
|
2608e780abec654ff2d02e76d9ddd528d7fa69fa
|
refs/heads/master
| 2021-01-10T21:27:21.727342
| 2011-04-24T15:42:54
| 2011-04-24T15:42:54
| 832,110
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,069
|
py
|
import sqlite3
import yaupon.backend
from yaupon.data_structures.sqlite_tools import to_db, from_db
class SQLiteDictIterator(object):
def __init__(self, cursor):
self.cursor = cursor
def __iter__(self):
return self
def next(self):
result = self.cursor.fetchone()
if result is None:
raise StopIteration
else:
return self.transform(result)
class SQLiteDictScalarIterator(SQLiteDictIterator):
def __init__(self, cursor):
SQLiteDictIterator.__init__(self, cursor)
def transform(self, result):
return from_db(result[0])
class SQLiteDictRowIterator(SQLiteDictIterator):
def __init__(self, cursor):
SQLiteDictIterator.__init__(self, cursor)
def transform(self, result):
return tuple(map(from_db, result))
class SQLiteDict(object):
def __init__(self,
backend=None,
id=None,
pickle_protocol=2,
dict_args=None,
dict_kwargs=None):
if backend is None:
backend = yaupon.backend.BackendSQLite()
self.backend = yaupon.backend.getbackend(backend)
self.conn = backend.conn
self.pickle_protocol = pickle_protocol
self.conn.execute("""CREATE TABLE IF NOT EXISTS dict_instances
(id INTEGER PRIMARY KEY AUTOINCREMENT)
""")
if id is not None:
self.id = id
else:
self.id = self.conn.execute("""INSERT INTO dict_instances
VALUES (NULL)""").lastrowid
self.conn.execute("""CREATE TABLE IF NOT EXISTS dict_%s
(key BLOB UNIQUE,
value BLOB)
""" % self.id)
self.conn.execute("""CREATE INDEX IF NOT EXISTS dict_%s_key_index
ON dict_%s(key)
""" % (self.id, self.id))
self.conn.commit()
self.__get_STMT = 'SELECT value FROM dict_%s WHERE key = ?' % \
self.id
self.__set_STMT = """REPLACE INTO dict_%s (key, value)
VALUES (?,?)""" % self.id
self.__delete_STMT = 'DELETE FROM dict_%s WHERE key = ?' % self.id
if dict_args is None:
dict_args = []
if dict_kwargs is None:
dict_kwargs = {}
initial_dict = dict(*dict_args, **dict_kwargs)
self.update(initial_dict)
def __getstate__(self):
state = self.__dict__.copy()
del state['backend']
del state['conn']
state['backend_id'] = self.backend.id
return state
def __setstate__(self, state):
backend_id = state.pop('backend_id')
self.__dict__.update(state)
self.backend = yaupon.backend.get_cached_sqlite_backend(backend_id)
self.conn = self.backend.conn
def __backend__(self):
return self.backend
def __get_helper(self, key):
cursor = self.conn.execute(self.__get_STMT, (to_db(key),))
return cursor.fetchone()
def __getitem__(self, key):
result = self.__get_helper(key)
if result is None:
raise KeyError(key)
return from_db(result[0])
def __setitem__(self, key, value):
self.conn.execute(self.__set_STMT, (to_db(key), to_db(value)))
self.conn.commit()
def __delitem__(self, key):
result = self.__get_helper(key)
if result is None:
raise KeyError(key)
self.conn.execute(self.__delete_STMT, (to_db(key),))
self.conn.commit()
def has_key(self, key):
return self.__get_helper(key) is not None
def __contains__(self, key):
return self.has_key(key)
def get(self, key, defaultval=None):
result = self.__get_helper(key)
if result is None:
return defaultval
return from_db(result[0])
def clear(self):
self.conn.execute('DELETE FROM dict_%s' % self.id)
self.conn.commit()
def update(self, d):
for k,v in d.iteritems():
self.__setitem__(k,v)
def setdefault(self, key, value=None):
real_value = self.__get_helper(key)
if real_value is None:
self.__setitem__(key, value)
return value
else:
return from_db(real_value[0])
def iteritems(self):
cursor = self.conn.execute("""SELECT key,value
FROM dict_%s""" % self.id)
return SQLiteDictRowIterator(cursor)
def iterkeys(self):
cursor = self.conn.execute('SELECT key FROM dict_%s' % self.id)
return SQLiteDictScalarIterator(cursor)
def itervalues(self):
cursor = self.conn.execute('SELECT value FROM dict_%s' % self.id)
return SQLiteDictScalarIterator(cursor)
def __len__(self):
cursor = self.conn.execute('SELECT COUNT(*) FROM dict_%s' % self.id)
return cursor.fetchone()[0]
|
[
"aaron.windsor@gmail.com"
] |
aaron.windsor@gmail.com
|
9efb152f8faa20bf3b3821830425fbcf69e62ea4
|
1625edfe28b4b0979fd32b4a3c5e55249a993fd5
|
/baekjoon11048.py
|
172ca066d44cfb4bc64b7723fa299d1e33e4c9e9
|
[] |
no_license
|
beOk91/baekjoon2
|
b8bf504c506c6278899d4107ecfe51974ef13f5e
|
39569f8effb8e32405a7d74d98bdabcab783ec56
|
refs/heads/master
| 2023-05-11T20:11:19.015113
| 2020-09-14T23:58:49
| 2020-09-14T23:58:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 485
|
py
|
n,m=map(int,input().strip().split())
candy=[[0]*m for _ in range(n)]
dp=[[0]*m for _ in range(n)]
for i in range(n):
candy[i]=list(map(int,input().strip().split()))
for i in range(n):
for j in range(m):
dp[i][j]+=candy[i][j]
if i>=1 and j>=1:
dp[i][j]=candy[i][j]+max(dp[i-1][j],dp[i][j-1],dp[i-1][j-1])
elif j==0 and i>=1:
dp[i][j]+=dp[i-1][j]
elif i==0 and j>=1:
dp[i][j]+=dp[i][j-1]
print(dp[n-1][m-1])
|
[
"be_ok91@naver.com"
] |
be_ok91@naver.com
|
23cbad3a18804ebdd91da88659a373dc225c8d96
|
dd573ed68682fd07da08143dd09f6d2324f51345
|
/baekjoon/my_study/1022_다른풀이2.py
|
7bf4d84e6ea0cc27ffb55dfd6dcd11b4b3472466
|
[] |
no_license
|
chelseashin/My-Algorithm
|
0f9fb37ea5c6475e8ff6943a5fdaa46f0cd8be61
|
db692e158ebed2d607855c8e554fd291c18acb42
|
refs/heads/master
| 2021-08-06T12:05:23.155679
| 2021-07-04T05:07:43
| 2021-07-04T05:07:43
| 204,362,911
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 741
|
py
|
import sys
sys.stdin = open('1022_input.txt')
input = sys.stdin.readline
# 우 상 좌 하
d = [(1, 0), (0, -1), (-1, 0), (0, 1)]
r1, c1, r2, c2 = map(int, input().split())
arr = [[0] * (c2-c1+1) for _ in range(r2-r1+1)]
max_level = max(abs(r1), abs(c1), abs(r2), abs(c2))
r, c = 0, 0
cnt = 1
max_cnt = (max_level*2 + 1) ** 2
dist = 0
max_value = 0
i = 0
while cnt <= max_cnt:
if d[i][0]: dist += 1
for _ in range(dist):
if r1 <= r <= r2 and c1 <= c <= c2:
arr[r-r1][c-c1] = cnt
max_value = cnt
cnt += 1
c += d[i][0]
r += d[i][1]
i = (i + 1) % 4
max_length = len(str(max_value))
for i in arr:
for j in i:
print(str(j).rjust(max_length), end=" ")
print()
|
[
"chaewonshin95@gmail.com"
] |
chaewonshin95@gmail.com
|
ba8981626612bcea6b8a6291d19cb5fff90415c4
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02714/s449498366.py
|
6b369949d8b8c4f43a2335477399ed7b459eacfd
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 225
|
py
|
from collections import *
N=int(input())
S=input()
c=Counter(S)
ans=c['R']*c['G']*c['B']
for i in range(1,N//2+1):
for j in range(i,N-i):
if S[j-i]!=S[j] and S[j]!=S[j+i] and S[j+i]!=S[j-i]:
ans-=1
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
4f9889c051ab3c9bec42a8c062228eb2f4078940
|
ad963dc590fe3ee16fe70674ffa9a77a3462a2d2
|
/taskManager/migrations/0021_auto_20200819_0632.py
|
2efe17d7eef88bf142301e0361778a5ec6d7ef21
|
[] |
no_license
|
ShuheiKuriki/task_manager
|
564dc1a646efdd288ff31bc9044981aecbd6db78
|
f5d4a53a758c64615f22c69baae59b36dd5dab1f
|
refs/heads/master
| 2023-05-12T11:06:11.388036
| 2023-01-15T09:12:37
| 2023-01-15T09:12:37
| 234,110,257
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 869
|
py
|
# Generated by Django 3.0.2 on 2020-08-18 21:32
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('taskManager', '0020_auto_20200425_1624'),
]
operations = [
migrations.AlterField(
model_name='task',
name='deadline',
field=models.DateField(default=django.utils.timezone.now, verbose_name='期限'),
),
migrations.AlterField(
model_name='task',
name='done_date',
field=models.DateField(default=django.utils.timezone.now, verbose_name='完了した日'),
),
migrations.AlterField(
model_name='task',
name='when',
field=models.DateField(default=django.utils.timezone.now, verbose_name='実行予定日'),
),
]
|
[
"shukuri.7336.8@gmail.com"
] |
shukuri.7336.8@gmail.com
|
543728b72f006a8af964401c5f1de19a34155aaa
|
1775a5522f465cb74a1e02393d32c363bb7ef215
|
/tests/i18n/contenttypes/tests.py
|
894ae0a3c737dff6674660450c1c9bdfd1e3d4c2
|
[
"BSD-3-Clause"
] |
permissive
|
trught007/django
|
b280eaff7706e72a6fc0f298c68e3c065daa448b
|
d55d21dbb8b307941c2d26b95be46bf83015d868
|
refs/heads/master
| 2022-12-21T04:23:49.786811
| 2020-10-01T08:24:33
| 2020-10-01T08:24:33
| 300,203,187
| 0
| 0
|
NOASSERTION
| 2020-10-01T08:23:34
| 2020-10-01T08:23:33
| null |
UTF-8
|
Python
| false
| false
| 1,164
|
py
|
# coding: utf-8
from __future__ import unicode_literals
import os
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase, override_settings
from django.test.utils import TransRealMixin
from django.utils._os import upath
from django.utils import six
from django.utils import translation
@override_settings(
USE_I18N=True,
LOCALE_PATHS=(
os.path.join(os.path.dirname(upath(__file__)), 'locale'),
),
LANGUAGE_CODE='en',
LANGUAGES=(
('en', 'English'),
('fr', 'French'),
),
)
class ContentTypeTests(TransRealMixin, TestCase):
def test_verbose_name(self):
company_type = ContentType.objects.get(app_label='i18n', model='company')
with translation.override('en'):
self.assertEqual(six.text_type(company_type), 'Company')
with translation.override('fr'):
self.assertEqual(six.text_type(company_type), 'Société')
def test_field_override(self):
company_type = ContentType.objects.get(app_label='i18n', model='company')
company_type.name = 'Other'
self.assertEqual(six.text_type(company_type), 'Other')
|
[
"ekurgn@gmail.com"
] |
ekurgn@gmail.com
|
92491a80807b1a8457d04db0b1caa1a5bc99bfc8
|
ff738b3ec7e5c8c414f6d3c7d74310d8fab69368
|
/Top Interview Questions/236. Lowest Common Ancestor of a Binary Tree/solution.py
|
a8394b1efd2afa040587dddb0b3ee1a3f7fe33e2
|
[] |
no_license
|
jw3329/leetcode-problem-solving
|
a0684ef13bd60e81bd54b91e1b54827aaac9bf16
|
0cc7ad64891a23e348c8214f806a2820ac8c9e0a
|
refs/heads/main
| 2023-08-17T20:36:51.624415
| 2023-08-17T07:09:56
| 2023-08-17T07:09:56
| 170,944,191
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 588
|
py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
if not root or root == p or root == q: return root
left = self.lowestCommonAncestor(root.left,p,q)
right = self.lowestCommonAncestor(root.right,p,q)
if not left:
return right
else:
if not right:
return left
return root
|
[
"junwon3329@gmail.com"
] |
junwon3329@gmail.com
|
7bd223eed55c60a6bbc672d3eead4cec2eaa85da
|
9e87897c988af634c3fddc42113992a65ec006f4
|
/sims/publish/Timer.py
|
55983e3ee337f2acbf16df75fcc7534d8c10ee4c
|
[
"MIT"
] |
permissive
|
luiarthur/cytof5
|
152eb06030785fdff90220f0d0a244a02204c2e9
|
6b4df5e9fd94bfd586e96579b8c618fdf6f913ed
|
refs/heads/master
| 2021-07-20T13:39:45.821597
| 2021-03-02T23:27:35
| 2021-03-02T23:27:35
| 145,253,611
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 512
|
py
|
import time
class Timer(object):
"""
Usage:
with Timer('Model training'):
time.sleep(2)
x = 1
"""
def __init__(self, name=None, ndigits=3):
self.name = name
self.ndigits = ndigits
def __enter__(self):
self.tstart = time.time()
def __exit__(self, type, value, traceback):
if self.name:
print(self.name, end=' ')
elapsed = time.time() - self.tstart
print('time: {}s'.format(round(elapsed, self.ndigits)))
|
[
"luiarthur@gmail.com"
] |
luiarthur@gmail.com
|
c235e510251267965a2a2495d688e9f3a662d539
|
6d143b66abdc9d871468e0576de09c63ca71a630
|
/A98_Guess_Bin_Dec/QA.py
|
69aa9552f8b49c96a9173f8178b01b531fccdb8b
|
[] |
no_license
|
ChristerNilsson/KosmosTeacher
|
98acf9bf35ed13930a05641652e68c7e0f3d2d7c
|
f47d2e8dba21f2ea6f19d6c7b766f36ccbcbeda4
|
refs/heads/master
| 2021-03-19T13:45:51.307065
| 2016-10-13T01:25:34
| 2016-10-13T01:25:34
| 68,541,540
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,044
|
py
|
import random
class QA:
def __init__(self,width,height):
self.level = 1
self.width = width
self.height = height
self.longList = self.makeLongList()
self.shortList = self.makeShortList()
def makeShortList(self):
self.n = self.level+1
self.index = random.randint(0, self.n-1)
self.guess_index = -1
return random.sample(self.longList, self.n)
def mousePressed(self):
if mouseX < width/2: return
self.guess_index = mouseY/(height/self.n)
self.level += 1 if self.guess_index == self.index else -1
self.level = constrain(self.level, 1, 10)
self.longList = self.makeLongList()
self.shortList = self.makeShortList()
def draw(self):
if self.index >= len(self.shortList): return
self.displayQuestion(self.shortList[self.index], 0, 0, width/2, height)
h = height/self.n
for i in range(len(self.shortList)):
self.displayAnswer(self.shortList[i], width/2, i*h, width/2, h)
|
[
"janchrister.nilsson@gmail.com"
] |
janchrister.nilsson@gmail.com
|
7ee95bf97026d05a27e1e151a7c260a48f2df776
|
e5b8a5d93989dd53933c5cd417afa8b2a39ad307
|
/ultracart/models/cart_settings_payment_credit_card.py
|
5035a14959094b67696de38b8d2b473edf852597
|
[
"Apache-2.0"
] |
permissive
|
gstingy/uc_python_api
|
f3586bfce9c962af2e8c1bc266ff25e0f1971278
|
9a0bd3f6e63f616586681518e44fe37c6bae2bba
|
refs/heads/master
| 2020-03-28T11:13:22.537641
| 2018-09-10T17:07:59
| 2018-09-10T17:07:59
| 148,190,066
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,222
|
py
|
# coding: utf-8
"""
UltraCart Rest API V2
UltraCart REST API Version 2
OpenAPI spec version: 2.0.0
Contact: support@ultracart.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class CartSettingsPaymentCreditCard(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'collect_credit_card_verification_number': 'bool',
'credit_card_types': 'list[str]',
'hosted_fields_shopping_cart_token': 'str'
}
attribute_map = {
'collect_credit_card_verification_number': 'collect_credit_card_verification_number',
'credit_card_types': 'credit_card_types',
'hosted_fields_shopping_cart_token': 'hosted_fields_shopping_cart_token'
}
def __init__(self, collect_credit_card_verification_number=None, credit_card_types=None, hosted_fields_shopping_cart_token=None):
"""
CartSettingsPaymentCreditCard - a model defined in Swagger
"""
self._collect_credit_card_verification_number = None
self._credit_card_types = None
self._hosted_fields_shopping_cart_token = None
self.discriminator = None
if collect_credit_card_verification_number is not None:
self.collect_credit_card_verification_number = collect_credit_card_verification_number
if credit_card_types is not None:
self.credit_card_types = credit_card_types
if hosted_fields_shopping_cart_token is not None:
self.hosted_fields_shopping_cart_token = hosted_fields_shopping_cart_token
@property
def collect_credit_card_verification_number(self):
"""
Gets the collect_credit_card_verification_number of this CartSettingsPaymentCreditCard.
True if the credit card verification number should be collected
:return: The collect_credit_card_verification_number of this CartSettingsPaymentCreditCard.
:rtype: bool
"""
return self._collect_credit_card_verification_number
@collect_credit_card_verification_number.setter
def collect_credit_card_verification_number(self, collect_credit_card_verification_number):
"""
Sets the collect_credit_card_verification_number of this CartSettingsPaymentCreditCard.
True if the credit card verification number should be collected
:param collect_credit_card_verification_number: The collect_credit_card_verification_number of this CartSettingsPaymentCreditCard.
:type: bool
"""
self._collect_credit_card_verification_number = collect_credit_card_verification_number
@property
def credit_card_types(self):
"""
Gets the credit_card_types of this CartSettingsPaymentCreditCard.
Available credit card types
:return: The credit_card_types of this CartSettingsPaymentCreditCard.
:rtype: list[str]
"""
return self._credit_card_types
@credit_card_types.setter
def credit_card_types(self, credit_card_types):
"""
Sets the credit_card_types of this CartSettingsPaymentCreditCard.
Available credit card types
:param credit_card_types: The credit_card_types of this CartSettingsPaymentCreditCard.
:type: list[str]
"""
self._credit_card_types = credit_card_types
@property
def hosted_fields_shopping_cart_token(self):
"""
Gets the hosted_fields_shopping_cart_token of this CartSettingsPaymentCreditCard.
The shoppingCartToken needed for proper initialization of hosted fields collection
:return: The hosted_fields_shopping_cart_token of this CartSettingsPaymentCreditCard.
:rtype: str
"""
return self._hosted_fields_shopping_cart_token
@hosted_fields_shopping_cart_token.setter
def hosted_fields_shopping_cart_token(self, hosted_fields_shopping_cart_token):
"""
Sets the hosted_fields_shopping_cart_token of this CartSettingsPaymentCreditCard.
The shoppingCartToken needed for proper initialization of hosted fields collection
:param hosted_fields_shopping_cart_token: The hosted_fields_shopping_cart_token of this CartSettingsPaymentCreditCard.
:type: str
"""
self._hosted_fields_shopping_cart_token = hosted_fields_shopping_cart_token
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, CartSettingsPaymentCreditCard):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"perry@ultracart.com"
] |
perry@ultracart.com
|
e5605dc11600c8d220f96a2cd4525bcd1afc3fbe
|
e164fd9dce5fef093f85ca009f78570ec2b1c492
|
/134. Gas Station.py
|
460d1262ff314afb450941395616bf4f0bf76202
|
[] |
no_license
|
havenshi/leetcode
|
58fde93a1f1cbdd3c2faa9566c00383e5812f3a7
|
bcb79f329bcb133e6421db8fc1f4780a4eedec39
|
refs/heads/master
| 2021-01-22T04:15:23.748793
| 2019-11-30T04:25:54
| 2019-11-30T04:25:54
| 92,447,327
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 745
|
py
|
class Solution(object):
def canCompleteCircuit(self, gas, cost):
"""
:type gas: List[int]
:type cost: List[int]
:rtype: int
"""
sum = 0 # total remained gas
subsum = 0 # remained gas for each period
index = 0
for i in range(len(gas)):
if subsum + gas[i] - cost[i] >= 0: # can come to next station
subsum += gas[i] - cost[i]
sum += gas[i] - cost[i]
else:
subsum = 0
index = i + 1 # recount from next station
sum += gas[i] - cost[i] # still the total
if sum<0: # total gas can not cover a circle
return -1
else:
return index
|
[
"haiwen.shi01@gmail.com"
] |
haiwen.shi01@gmail.com
|
2d61a2d61a5c1cfb5a5512320d6a0d0b8f917e40
|
d41d18d3ea6edd2ec478b500386375a8693f1392
|
/plotly/validators/pointcloud/_stream.py
|
e428b82d9afa0bbe1a71fbaabb76554bb63cde83
|
[
"MIT"
] |
permissive
|
miladrux/plotly.py
|
38921dd6618650d03be9891d6078e771ffccc99a
|
dbb79e43e2cc6c5762251537d24bad1dab930fff
|
refs/heads/master
| 2020-03-27T01:46:57.497871
| 2018-08-20T22:37:38
| 2018-08-20T22:37:38
| 145,742,203
| 1
| 0
|
MIT
| 2018-08-22T17:37:07
| 2018-08-22T17:37:07
| null |
UTF-8
|
Python
| false
| false
| 857
|
py
|
import _plotly_utils.basevalidators
class StreamValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name='stream', parent_name='pointcloud', **kwargs
):
super(StreamValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str='Stream',
data_docs="""
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to *50*, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See
https://plot.ly/settings for more details.""",
**kwargs
)
|
[
"adam.kulidjian@gmail.com"
] |
adam.kulidjian@gmail.com
|
2435df37568acac6639a123d883dec6a89e6d42a
|
a03eba726a432d8ef133f2dc55894ba85cdc4a08
|
/events/contrib/plugins/form_elements/fields/email/__init__.py
|
dcfddf62d769375628ada36fea952633354c581d
|
[
"MIT"
] |
permissive
|
mansonul/events
|
2546c9cfe076eb59fbfdb7b4ec8bcd708817d59b
|
4f6ca37bc600dcba3f74400d299826882d53b7d2
|
refs/heads/master
| 2021-01-15T08:53:22.442929
| 2018-01-30T16:14:20
| 2018-01-30T16:14:20
| 99,572,230
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 367
|
py
|
__title__ = 'fobi.contrib.plugins.form_elements.fields.email'
__author__ = 'Artur Barseghyan <artur.barseghyan@gmail.com>'
__copyright__ = '2014-2017 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = ('default_app_config', 'UID',)
default_app_config = 'fobi.contrib.plugins.form_elements.fields.' \
'email.apps.Config'
UID = 'email'
|
[
"contact@dragosnicu.com"
] |
contact@dragosnicu.com
|
46da629c2f47e5979d62a6729f7c6c481856589d
|
f323771a686df2b934597f40334f168fa5d8915e
|
/interface_manager/backend/impi_project/interface_app/views/task/task_detail_results_views.py
|
e538efda5a6adfbdc8bc2239e5165fc01ec335b2
|
[] |
no_license
|
luochun3731/test-dev
|
4ac560b5cc2935986be7f3d29f445f6c0d59f60f
|
bab018464d26de5a493e5e5a60b382e612b6ca92
|
refs/heads/master
| 2022-02-04T13:20:23.748457
| 2019-05-19T03:32:41
| 2019-05-19T03:32:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,469
|
py
|
import datetime
from django.forms.models import model_to_dict
from interface_app import common
from interface_app.models.task import Task, TaskInterface
from interface_app.models.result import TaskResult, InterfaceResult
from django.views.generic import View
from interface_app.my_exception import MyException
# Create your views here.
# task的接口的增删改查
class TaskDetailVersionViews(View):
def get(self, request, pk, *args, **kwargs):
"""
获取单个任务的版本列表
:param request:
:param pk: # 任务的id
:param args:
:param kwargs:
:return:
"""
results = TaskResult.objects.filter(task_id=pk).order_by('-version')
ret = []
for i in results:
tmp = dict()
tmp["version"] = i.version
tmp["task_id"] = i.task_id
tmp['created'] = i.created.strftime("%Y-%m-%d %H:%M")
tmp["id"] = i.id
ret.append(tmp)
return common.response_success(ret)
class TaskDetailVersionResultsViews(View):
def get(self, request, pk, *args, **kwargs):
"""
获取一个版本的结果列表
:param request:
:param pk: # 任务的id
:param args:
:param kwargs:
:return:
"""
results = InterfaceResult.objects.filter(task_result_id=pk)
ret = [model_to_dict(i) for i in results]
return common.response_success(ret)
|
[
"harter@126.com"
] |
harter@126.com
|
00c8fe4031a06644da8c84e78b0266f96322e245
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_domains.py
|
e47a8adb75f53f5e40efaa4d4d78e4e9ac48034d
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
from xai.brain.wordbase.nouns._domain import _DOMAIN
#calss header
class _DOMAINS(_DOMAIN, ):
def __init__(self,):
_DOMAIN.__init__(self)
self.name = "DOMAINS"
self.specie = 'nouns'
self.basic = "domain"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
03ea1db815ea69be1500fd4fa0602c4e3cb62694
|
25439cf61037818a6c78a4d6db7edfddd7dc9b51
|
/src/python/zquantum/core/history/save_conditions.py
|
9a731f048d410c40324188401e6837557d0fd8f2
|
[
"Apache-2.0"
] |
permissive
|
antalszava/z-quantum-core
|
fa1f6b79709256e690194de306ac84df409f6950
|
8137ecb5833ab7996815c24eefa685317b5e2e13
|
refs/heads/master
| 2023-01-08T08:45:20.277025
| 2020-11-02T15:23:24
| 2020-11-02T15:23:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,527
|
py
|
"""Save conditions possible to use with recorder."""
from typing import Any
from typing_extensions import Protocol
class SaveCondition(Protocol):
"""Protocol of a function determining if given call should should be saved in the history."""
def __call__(self, value: Any, params: Any, call_number: int) -> bool:
"""Determine whether current call should be saved in the history.
Suppose the recorder is constructed for a function `f`, and the params
`x` are passed to `f` such that `y`=`f(x)`. Then, if this is `n-th`
evaluation of the function, the value of __call__(y, x, n) determines
if current call should be saved to the history.
:param value: current value of the function.
:param params: parameters passed to the function.
:param call_number: a natural number determining how many times the target
function has been called.
:return: A boolean indicating whether the call being processed should be saved to
history.
"""
pass
def always(value: Any, params: Any, call_number: int) -> bool:
"""Default save condition: save always."""
return True
def every_nth(n: int) -> SaveCondition:
"""Save condition: every n-th step, counting from zero-th one.
Note: this is factory function, i.e. it returns the actual save condition for given n.
"""
def _save_condition(value: Any, params: Any, call_number: int) -> bool:
return call_number % n == 0
return _save_condition
|
[
"dexter2206@gmail.com"
] |
dexter2206@gmail.com
|
ed1dae2731b95b2fb16c3f98fb739df9a348662a
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_360/ch149_2020_04_13_19_40_50_377558.py
|
5a4ab204d93e91a583e1bd2feb89d2c0154352fc
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 863
|
py
|
#inputs
salario = float(input("Qual o valor do seu salário?"))
dependentes = int(input("Qual o número de dependentes que você possui?"))
#cpi -> contribuição para o INSS
if salario<=1045.00:
cpi = salario*0.075
elif 1045.01<=salario<=2089.60:
cpi = salario*0.09
elif 2089.61<=salario<=3134.40:
cpi = salario*0.12
elif 3134.41<=salario<=6101.06:
cpi = salario*0.15
else:
cpi = 671.12
#bdc -> Base de cálculo
bdc = salario - cpi - (dependentes*189.59)
#alq -> Alíquota / ddc -> Dedução
if bdc<=1903.98:
alq = 0.0
ddc = 0.0
elif 1903.99<=bdc<=2826.65:
alq = 0.075
ddc = 142.8
elif 2826.66<=bdc<=3751.05:
alq = 0.15
ddc = 354.8
elif 3751.06<=bdc<=4664.68:
alq = 0.225
ddc = 636.13
else:
alq = 0.275
ddc = 869.36
#irf -> IRRF
irf = (bdc*alq) - ddc
print("O IRRF simplificado é: {0}".format(irf))
|
[
"you@example.com"
] |
you@example.com
|
a7c35533e3ca5b39277826fd3ced2b3e6d144ed4
|
8a25ada37271acd5ea96d4a4e4e57f81bec221ac
|
/home/pi/GrovePi/Software/Python/others/temboo/Library/CorpWatch/Search/__init__.py
|
d4a4dbc01fbbb5c4e9e067ff9c8408ca1f330110
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
lupyuen/RaspberryPiImage
|
65cebead6a480c772ed7f0c4d0d4e08572860f08
|
664e8a74b4628d710feab5582ef59b344b9ffddd
|
refs/heads/master
| 2021-01-20T02:12:27.897902
| 2016-11-17T17:32:30
| 2016-11-17T17:32:30
| 42,438,362
| 7
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 160
|
py
|
from temboo.Library.CorpWatch.Search.CompoundSearch import CompoundSearch, CompoundSearchInputSet, CompoundSearchResultSet, CompoundSearchChoreographyExecution
|
[
"lupyuen@gmail.com"
] |
lupyuen@gmail.com
|
faac4a6cd4ec646fc0024bc28f81558a4cd81c2c
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_246/ch43_2020_03_30_19_26_24_928655.py
|
f0b9458eee10919d0689f4465806a7c9930a7dd9
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
i=int(input('Qual é o número do mês:'))
meses=[Janeiro, Fevereiro, Março, Abril, Maio, Junho, Julho, Agosto, Setembro, Outubro, Novembro, Dezembro]
print (meses[i-1])
|
[
"you@example.com"
] |
you@example.com
|
6170faf7a4373ceea1b8454264dad210b318c3be
|
0b25dc3f9b4ef736e739aadddec33b96dd65a0c8
|
/面向对象/hello1.py
|
777efad3932ed74d0f7ab912c6e50d9975306ee6
|
[] |
no_license
|
ttp55/LearnPy
|
b123f44a74e4364771491c572705742c15eb33ff
|
1530e158bde152e5c585f496dd1e5ffcffdb73bc
|
refs/heads/master
| 2023-05-11T13:02:30.157285
| 2023-05-08T07:13:57
| 2023-05-08T07:13:57
| 196,953,630
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 144
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'WZG'
from Hello import Hello
h = Hello()
print(type(Hello), type(h), h.hello())
|
[
"1047697347@qq.com"
] |
1047697347@qq.com
|
cf8f8d73405b71160adac91f36ff89bc04192843
|
d98e4ce0a2d5064c38e7445b47d094661cdc2d54
|
/lib/bango/constants.py
|
c7e17a6117303d94734a55dfb791d0c15a0f7293
|
[] |
no_license
|
wraithan/solitude
|
549f007b4ac294439788f903110719bb6b404844
|
c46508929f510513a9ffc91017a1dc7983630ea3
|
refs/heads/master
| 2021-01-16T19:51:34.925247
| 2013-01-11T17:29:22
| 2013-01-11T17:29:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,541
|
py
|
import re
ACCESS_DENIED = 'ACCESS_DENIED'
BANGO_ALREADY_PREMIUM_ENABLED = 'BANGO_ALREADY_PREMIUM_ENABLED'
BANK_DETAILS_EXIST = 'BANK_DETAILS_EXIST'
INTERNAL_ERROR = 'INTERNAL_ERROR'
# There is one of these for every field.
INVALID = re.compile('^INVALID_\w+$')
NO_BANGO_EXISTS = 'NO_BANGO_EXISTS'
OK = 'OK'
REQUIRED_CONFIGURATION_MISSING = 'REQUIRED_CONFIGURATION_MISSING'
SERVICE_UNAVAILABLE = 'SERVICE_UNAVAILABLE'
HEADERS_SERVICE = 'x-solitude-service'
HEADERS_SERVICE_GET = 'HTTP_X_SOLITUDE_SERVICE'
CURRENCIES = {
'AUD': 'Australian Dollars',
'CAD': 'Canadian Dollars',
'CHF': 'Swiss Francs',
'DKK': 'Danish Krone',
'EUR': 'Euro',
'GBP': 'Pounds Sterling',
'MYR': 'Malaysian Ringgit',
'NOK': 'Norwegian Krone',
'NZD': 'New Zealand Dollars',
'SEK': 'Swedish Krone',
'SDG': 'Singapore Dollar',
'THB': 'Thai Baht',
'USD': 'US Dollars',
'ZAR': 'South African Rand',
}
# TODO: Expand this bug 814492.
CATEGORIES = {
1: 'Games'
}
# List of valid country codes: http://en.wikipedia.org/wiki/ISO_3166-1_alpha-3
COUNTRIES = [
'BRA',
'ESP'
]
RATINGS = ['GLOBAL', 'UNIVERSAL', 'RESTRICTED']
RATINGS_SCHEME = ['GLOBAL', 'USA']
PAYMENT_TYPES = ('OPERATOR', 'PSMS', 'CARD', 'INTERNET')
def match(status, constant):
# There's going to be an INVALID_ something for every field in every form
# adding them all to this is boring. Let's make a regex to map them.
if isinstance(constant, basestring):
return status, constant
return constant.match(status)
|
[
"amckay@mozilla.com"
] |
amckay@mozilla.com
|
7f9bbff19c960a1447601077fbce7f295141b375
|
15977cb6d59853e3b1c6ce6ef5ed33993b838a93
|
/rmgpy/rmg/react.py
|
d68d358807f917833e00fb769972d8d90a1c38d9
|
[
"MIT"
] |
permissive
|
nateharms/RMG-Py
|
43cf7637005829a0ae743763a70bf27b6d29e5ac
|
80deaebddcbb14b7c41e232b67e1c973e0b18324
|
refs/heads/master
| 2021-01-18T18:29:49.279983
| 2016-08-23T14:05:24
| 2016-08-24T18:51:18
| 62,814,768
| 1
| 0
|
NOASSERTION
| 2020-06-02T21:33:44
| 2016-07-07T14:49:18
|
Python
|
UTF-8
|
Python
| false
| false
| 6,546
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2009 Prof. William H. Green (whgreen@mit.edu) and the
# RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
Contains functions for generating reactions.
"""
import logging
import itertools
from rmgpy.molecule.molecule import Molecule
from rmgpy.data.rmg import getDB
from rmgpy.scoop_framework.util import map_
from rmgpy.species import Species
def react(*spcTuples):
"""
Generate reactions between the species in the
list of species tuples for all the reaction families available.
For each tuple of one or more Species objects [(spc1,), (spc2, spc3), ...]
the following is done:
A list of tuples is created for each resonance isomer of the species.
Each tuple consists of (Molecule, index) with the index the species index of the Species object.
Possible combinations between the first spc in the tuple, and the second species in the tuple
is obtained by taking the combinatorial product of the two generated [(Molecule, index)] lists.
Returns a flat generator object containing the generated Reaction objects.
"""
combos = []
for t in spcTuples:
t = tuple([spc.copy(deep=True) for spc in t])
if len(t) == 1:#unimolecular reaction
spc, = t
mols = [(mol, spc.index) for mol in spc.molecule]
combos.extend([(combo,) for combo in mols])
elif len(t) == 2:#bimolecular reaction
spcA, spcB = t
molsA = [(mol, spcA.index) for mol in spcA.molecule]
molsB = [(mol, spcB.index) for mol in spcB.molecule]
combos.extend(itertools.product(molsA, molsB))
results = map_(
reactMolecules,
combos
)
reactionList = itertools.chain.from_iterable(results)
return reactionList
def reactMolecules(moleculeTuples):
"""
Performs a reaction between
the resonance isomers.
The parameter contains a list of tuples with each tuple:
(Molecule, index of the core species it belongs to)
"""
families = getDB('kinetics').families
molecules, reactantIndices = zip(*moleculeTuples)
reactionList = []
for _, family in families.iteritems():
rxns = family.generateReactions(molecules)
reactionList.extend(rxns)
for reactant in molecules:
reactant.clearLabeledAtoms()
deflate(reactionList, molecules, reactantIndices)
return reactionList
def deflate(rxns, molecules, reactantIndices):
"""
The purpose of this function is to replace the reactants and
products of a reaction, stored as Molecule objects by
integer indices, corresponding to the species core index.
Creates a dictionary with Molecule objects as keys and newly
created Species objects as values.
It iterates over the reactantIndices array, with elements in this array
corresponding to the indices of the core species. It creates a
Molecule -> index entry in the previously created dictionary.
It iterates over the reaction list, and iteratively updates the
created dictionary as more reactions are processed.
"""
molDict = {}
for i, coreIndex in enumerate(reactantIndices):
if coreIndex != -1:
molDict[molecules[i]] = coreIndex
for rxn in rxns:
deflateReaction(rxn, molDict)
try:
deflateReaction(rxn.reverse, molDict)
except AttributeError, e:
pass
def reactAll(coreSpcList, numOldCoreSpecies, unimolecularReact, bimolecularReact):
"""
Reacts the core species list via uni- and bimolecular reactions.
"""
# Select reactive species that can undergo unimolecular reactions:
spcTuples = [(coreSpcList[i],)
for i in xrange(numOldCoreSpecies) if (unimolecularReact[i] and coreSpcList[i].reactive)]
for i in xrange(numOldCoreSpecies):
for j in xrange(i, numOldCoreSpecies):
# Find reactions involving the species that are bimolecular
# This includes a species reacting with itself (if its own concentration is high enough)
if bimolecularReact[i,j]:
if coreSpcList[i].reactive and coreSpcList[j].reactive:
spcTuples.append((coreSpcList[i], coreSpcList[j]))
rxns = list(react(*spcTuples))
return rxns
def deflateReaction(rxn, molDict):
"""
This function deflates a single reaction, and uses the provided
dictionary to populate reactants/products/pairs with integer indices,
if possible.
If the Molecule object could not be found in the dictionary, a new
dictionary entry is created, creating a new Species object as the value
for the entry.
The reactants/products/pairs of both the forward and reverse reaction
object are populated with the value of the dictionary, either an
integer index, or either a Species object.
"""
for mol in itertools.chain(rxn.reactants, rxn.products):
if not mol in molDict:
molDict[mol] = Species(molecule=[mol])
rxn.reactants = [molDict[mol] for mol in rxn.reactants]
rxn.products = [molDict[mol] for mol in rxn.products]
rxn.pairs = [(molDict[reactant], molDict[product]) for reactant, product in rxn.pairs]
|
[
"nickvandewiele@gmail.com"
] |
nickvandewiele@gmail.com
|
02e0da6b388d781ffe02391d8ca227943d545ccf
|
3efca607aefbd6cf558517bae689ccdacb7b383e
|
/test/functional/feature_dersig.py
|
ee176b713d41056f61d283b0c330bfe6a8bfe006
|
[
"MIT"
] |
permissive
|
MicroBitcoinOrg/MicroBitcoin
|
f761b2ff04bdcb650d7c0ddbef431ef95cd69541
|
0119e8eff44ec4d94313eaa30022a97692b71143
|
refs/heads/snapshot
| 2022-12-27T10:04:21.040945
| 2021-02-09T05:51:45
| 2021-02-09T05:51:45
| 132,959,214
| 21
| 33
|
MIT
| 2020-06-12T04:38:45
| 2018-05-10T22:07:51
|
C++
|
UTF-8
|
Python
| false
| false
| 6,145
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BIP66 (DER SIG).
Test that the DERSIG soft-fork activates at (regtest) height 1251.
"""
from test_framework.blocktools import create_coinbase, create_block, create_transaction
from test_framework.messages import msg_block
from test_framework.mininode import mininode_lock, P2PInterface
from test_framework.script import CScript
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, bytes_to_hex_str, wait_until
DERSIG_HEIGHT = 1251
# Reject codes that we might receive in this test
REJECT_INVALID = 16
REJECT_OBSOLETE = 17
REJECT_NONSTANDARD = 64
# A canonical signature consists of:
# <30> <total len> <02> <len R> <R> <02> <len S> <S> <hashtype>
def unDERify(tx):
"""
Make the signature in vin 0 of a tx non-DER-compliant,
by adding padding after the S-value.
"""
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if (len(newscript) == 0):
newscript.append(i[0:-1] + b'\0' + i[-1:])
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
class BIP66Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [['-whitelist=127.0.0.1']]
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[0].add_p2p_connection(P2PInterface())
self.log.info("Mining %d blocks", DERSIG_HEIGHT - 2)
self.coinbase_txids = [self.nodes[0].getblock(b)['tx'][0] for b in self.nodes[0].generate(DERSIG_HEIGHT - 2)]
self.nodeaddress = self.nodes[0].getnewaddress()
self.log.info("Test that a transaction with non-DER signature can still appear in a block")
spendtx = create_transaction(self.nodes[0], self.coinbase_txids[0],
self.nodeaddress, amount=1.0)
unDERify(spendtx)
spendtx.rehash()
tip = self.nodes[0].getbestblockhash()
block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
block = create_block(int(tip, 16), create_coinbase(DERSIG_HEIGHT - 1), block_time)
block.nVersion = 2
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
self.log.info("Test that blocks must now be at least version 3")
tip = block.sha256
block_time += 1
block = create_block(tip, create_coinbase(DERSIG_HEIGHT), block_time)
block.nVersion = 2
block.rehash()
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock)
with mininode_lock:
assert_equal(self.nodes[0].p2p.last_message["reject"].code, REJECT_OBSOLETE)
assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'bad-version(0x00000002)')
assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256)
del self.nodes[0].p2p.last_message["reject"]
self.log.info("Test that transactions with non-DER signatures cannot appear in a block")
block.nVersion = 3
spendtx = create_transaction(self.nodes[0], self.coinbase_txids[1],
self.nodeaddress, amount=1.0)
unDERify(spendtx)
spendtx.rehash()
# First we show that this tx is valid except for DERSIG by getting it
# rejected from the mempool for exactly that reason.
assert_equal(
[{'txid': spendtx.hash, 'allowed': False, 'reject-reason': '64: non-mandatory-script-verify-flag (Non-canonical DER signature)'}],
self.nodes[0].testmempoolaccept(rawtxs=[bytes_to_hex_str(spendtx.serialize())], allowhighfees=True)
)
# Now we verify that a block with this transaction is also invalid.
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock)
with mininode_lock:
# We can receive different reject messages depending on whether
# microd is running with multiple script check threads. If script
# check threads are not in use, then transaction script validation
# happens sequentially, and microd produces more specific reject
# reasons.
assert self.nodes[0].p2p.last_message["reject"].code in [REJECT_INVALID, REJECT_NONSTANDARD]
assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256)
if self.nodes[0].p2p.last_message["reject"].code == REJECT_INVALID:
# Generic rejection when a block is invalid
assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'block-validation-failed')
else:
assert b'Non-canonical DER signature' in self.nodes[0].p2p.last_message["reject"].reason
self.log.info("Test that a version 3 block with a DERSIG-compliant transaction is accepted")
block.vtx[1] = create_transaction(self.nodes[0], self.coinbase_txids[1], self.nodeaddress, amount=1.0)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
if __name__ == '__main__':
BIP66Test().main()
|
[
"iamstenman@protonmail.com"
] |
iamstenman@protonmail.com
|
52d73a48b5c2f302869cc8f0c3a901ca166808fa
|
bc437dc74647765b51996f64b35fda3d047daf93
|
/3_Intermediate+/day32_Automated_Birthday_Wisher/day32_start/day32-start.py
|
850642f1ced183de5325ea62b4df0dabbf9f2a78
|
[] |
no_license
|
macosta-42/100_days_of_code
|
e06720d57b6ed870a3dd4fa4e6d019296206a08f
|
5b527dc18bae2ef556c26f653ef3c4badf94bb82
|
refs/heads/main
| 2023-05-22T03:26:02.422275
| 2021-06-10T10:31:26
| 2021-06-10T10:31:26
| 328,963,362
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,866
|
py
|
import smtplib
import datetime as dt
import random
# Change the receiver email here
EMAIL_TO = ""
# Change your email and password here
EMAIL_FROM = ""
PASSWORD = ""
now = dt.datetime.now()
day_of_week = now.weekday()
if day_of_week == 0:
with open("quotes.txt", "r") as f:
quotes = f.read().splitlines()
rand_quote = random.choice(quotes)
with smtplib.SMTP("smtp.gmail.com") as connection:
connection.starttls()
connection.login(user=EMAIL_FROM, password=PASSWORD)
connection.sendmail(
from_addr=EMAIL_FROM,
to_addrs=EMAIL_TO,
msg=f"Subject:Monday Motivation\n\n{rand_quote}".encode('ascii', errors='ignore')
)
# import smtplib
# # Change the receiver email here
# email_to = ""
#
# # Change your email and password here
# email_from = ""
# password = ""
#
# with smtplib.SMTP("smtp.gmail.com") as connection:
# connection.starttls()
# connection.login(user=email_from, password=password)
# connection.sendmail(
# from_addr=email_from,
# to_addrs=email_to,
# msg="Subject:Hello\n\nThis is the body of my email."
# )
# # Change the receiver email here
# email_to = ""
#
# # Change your email and password here
# email_from = ""
# password = ""
#
# with smtplib.SMTP("smtp.mail.yahoo.com", port=587) as connection:
# connection.starttls()
# connection.login(user=email_from, password=password)
# connection.sendmail(
# from_addr=email_from,
# to_addrs=email_to,
# msg="Subject:Hello\n\nThis is the body of my email."
# )
# import datetime as dt
#
# now = dt.datetime.now()
# year = now.year
# month = now.month
# day_of_week = now.weekday()
# print(now)
# print(year)
# print(month)
# print(day_of_week)
#
# date_of_birth = dt.datetime(year=1985, month=11, day=2)
# print(date_of_birth)
|
[
"macosta@student.42.fr"
] |
macosta@student.42.fr
|
878270727d02763ab73a0b3dbe4846b3a1b33115
|
f72c9e46af5ce5ac738693daf65e67a0962a229a
|
/sdk/lusid/models/entity_identifier.py
|
9729ad91a9e7ee0e804c5cd8bde01f77b68ab38b
|
[
"MIT"
] |
permissive
|
finbourne/lusid-sdk-python
|
db8ce602f8408169f6583783c80ebbef83c77807
|
32fedc00ce5a37a6fe3bd9b9962570a8a9348e48
|
refs/heads/master
| 2023-08-29T18:22:49.488811
| 2023-08-29T15:57:26
| 2023-08-29T15:57:26
| 125,082,278
| 11
| 11
|
NOASSERTION
| 2023-04-28T07:16:48
| 2018-03-13T16:31:54
|
Python
|
UTF-8
|
Python
| false
| false
| 6,715
|
py
|
# coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 1.0.463
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from lusid.configuration import Configuration
class EntityIdentifier(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'identifier_scope': 'str',
'identifier_type': 'str',
'identifier_value': 'str'
}
attribute_map = {
'identifier_scope': 'identifierScope',
'identifier_type': 'identifierType',
'identifier_value': 'identifierValue'
}
required_map = {
'identifier_scope': 'optional',
'identifier_type': 'required',
'identifier_value': 'required'
}
def __init__(self, identifier_scope=None, identifier_type=None, identifier_value=None, local_vars_configuration=None): # noqa: E501
"""EntityIdentifier - a model defined in OpenAPI"
:param identifier_scope: The scope of the identifier
:type identifier_scope: str
:param identifier_type: The type of the identifier (required)
:type identifier_type: str
:param identifier_value: The value of the identifier (required)
:type identifier_value: str
""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._identifier_scope = None
self._identifier_type = None
self._identifier_value = None
self.discriminator = None
self.identifier_scope = identifier_scope
self.identifier_type = identifier_type
self.identifier_value = identifier_value
@property
def identifier_scope(self):
"""Gets the identifier_scope of this EntityIdentifier. # noqa: E501
The scope of the identifier # noqa: E501
:return: The identifier_scope of this EntityIdentifier. # noqa: E501
:rtype: str
"""
return self._identifier_scope
@identifier_scope.setter
def identifier_scope(self, identifier_scope):
"""Sets the identifier_scope of this EntityIdentifier.
The scope of the identifier # noqa: E501
:param identifier_scope: The identifier_scope of this EntityIdentifier. # noqa: E501
:type identifier_scope: str
"""
self._identifier_scope = identifier_scope
@property
def identifier_type(self):
"""Gets the identifier_type of this EntityIdentifier. # noqa: E501
The type of the identifier # noqa: E501
:return: The identifier_type of this EntityIdentifier. # noqa: E501
:rtype: str
"""
return self._identifier_type
@identifier_type.setter
def identifier_type(self, identifier_type):
"""Sets the identifier_type of this EntityIdentifier.
The type of the identifier # noqa: E501
:param identifier_type: The identifier_type of this EntityIdentifier. # noqa: E501
:type identifier_type: str
"""
if self.local_vars_configuration.client_side_validation and identifier_type is None: # noqa: E501
raise ValueError("Invalid value for `identifier_type`, must not be `None`") # noqa: E501
self._identifier_type = identifier_type
@property
def identifier_value(self):
"""Gets the identifier_value of this EntityIdentifier. # noqa: E501
The value of the identifier # noqa: E501
:return: The identifier_value of this EntityIdentifier. # noqa: E501
:rtype: str
"""
return self._identifier_value
@identifier_value.setter
def identifier_value(self, identifier_value):
"""Sets the identifier_value of this EntityIdentifier.
The value of the identifier # noqa: E501
:param identifier_value: The identifier_value of this EntityIdentifier. # noqa: E501
:type identifier_value: str
"""
if self.local_vars_configuration.client_side_validation and identifier_value is None: # noqa: E501
raise ValueError("Invalid value for `identifier_value`, must not be `None`") # noqa: E501
self._identifier_value = identifier_value
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EntityIdentifier):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, EntityIdentifier):
return True
return self.to_dict() != other.to_dict()
|
[
"concourse@finbourne.com"
] |
concourse@finbourne.com
|
d1e45646f104e957c2362515a6b771fbac434c43
|
397e125e94f4f139f2bf5055824d81f24b8b1757
|
/企業コン/M-SOLUTIONS プロコンオープン/D.py
|
4be09b400bf8143978a04bfb0cf22cda607a01e7
|
[] |
no_license
|
tails1434/Atcoder
|
ecbab6ee238e3f225551297db961b1b502841fa4
|
e7c7fed36be46bbaaf020a70997842240ba98d62
|
refs/heads/master
| 2021-07-07T00:31:49.235625
| 2020-09-30T01:42:01
| 2020-09-30T01:42:01
| 189,009,622
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 835
|
py
|
from collections import deque
def main():
N = int(input())
edge = [[] for _ in range(N)]
for _ in range(N-1):
a, b = map(int, input().split())
a -= 1
b -= 1
edge[a].append(b)
edge[b].append(a)
C = list(map(int, input().split()))
cnt = [0] * N
visited = [False] * N
Q = deque([0])
while Q:
q = Q.popleft()
visited[q] = True
for v in edge[q]:
if q == v:
continue
if visited[v]:
continue
cnt[q] += 1
Q.append(v)
tmp = []
for i in range(N):
tmp.append((cnt[i],i))
tmp.sort()
C.sort()
d = [0] * N
for i in range(N):
d[tmp[i][1]] = C[i]
print(sum(C) - max(C))
print(*d)
if __name__ == "__main__":
main()
|
[
"sososo1333@gmail.com"
] |
sososo1333@gmail.com
|
06b3e54127019eed2360e8eabf390410d6381fe5
|
58c03f5db2e969dd0a982cd95f28b2ad0ecd5651
|
/setup.py
|
685cb7ed33ff9949436f3716c5180e5da4c59c4d
|
[] |
no_license
|
rajsid1/django-th
|
6bccaa25652d0713f893b9ba533f496cc6fe67f8
|
8060df3803ea0f3d1c965c3b9fdf85628633975b
|
refs/heads/master
| 2021-01-15T09:29:14.177440
| 2014-11-30T21:26:40
| 2014-11-30T21:26:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,280
|
py
|
from setuptools import setup, find_packages
from django_th import __version__ as version
import os
def strip_comments(l):
return l.split('#', 1)[0].strip()
def reqs(*f):
return list(filter(None, [strip_comments(l) for l in open(
os.path.join(os.getcwd(), *f)).readlines()]))
install_requires = reqs('requirements.txt')
setup(
name='django_th',
version=version,
description='Trigger Happy - micro enterprise service bus (ESB) - a bridge between your internet services',
author='Olivier Demah',
author_email='olivier@foxmask.info',
url='https://github.com/foxmask/django-th',
download_url="https://github.com/foxmask/django-th/archive/trigger-happy-"
+ version + ".zip",
packages=find_packages(exclude=['django_th/local_settings']),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Framework :: Django',
],
install_requires=install_requires,
include_package_data=True,
)
|
[
"olivier@foxmask.info"
] |
olivier@foxmask.info
|
13306b3ec4618698ff1c402f46e0766579943674
|
9ef7e97ea83ec9dc1f792ef143aaa86c6e626ea6
|
/Laplaceequation/.svn/pristine/cd/cd0a97d4dd9b4a0d2ab02c13a884e55158c95ed1.svn-base
|
79c9fe07487cfe58b407e081b80e52478090c10a
|
[] |
no_license
|
leiradom/CFDhomework
|
b57b0644ad76f61458c5d54e3c023615b64da146
|
b59eae622e68f1127e65f3846cf8075ba82849f3
|
refs/heads/main
| 2023-05-30T19:35:30.088666
| 2021-06-04T14:47:47
| 2021-06-04T14:47:47
| 373,870,890
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,847
|
import math
import numpy as np
import matplotlib.pyplot as plt
# n1 = int(input("请输入翼型表面离散点数"))
# n2 = int(input("请输入近场到远场离散点数"))
#NACA0012表面坐标
#y1 = 0.6 * (-0.1015 * x**4 + 0.2843 * x**3 - 0.3576 * x**2 - 0.1221 * x + 0.2969 * x ** 0.5)
#y2 = -0.6 * (-0.1015 * x**4 + 0.2843 * x**3 - 0.3576 * x**2 - 0.1221 * x + 0.2969 * x ** 0.5)
#沿x轴等间距变化翼型表面,这样不好
'''
for i in range (0,101):
x = 0.01 * i
y1 = 0.6 * (-0.1015 * x**4 + 0.2843 * x**3 - 0.3576 * x**2 - 0.1221 * x + 0.2969 * x ** 0.5)
y2 = -0.6 * (-0.1015 * x**4 + 0.2843 * x**3 - 0.3576 * x**2 - 0.1221 * x + 0.2969 * x ** 0.5)
X[i] = x
Y1[i] = y1
Y2[i] = y2
plt.plot(X,Y1,'-o')
plt.plot(X,Y2,'-o')
plt.show()
'''
#定义常数
n1 = 40 #绕翼型一周的点数
n2 = 80 #从壁面到远场的离散点数
e1 = 0.1
e2 = 0.1
e0 = 1e-4 #设定收敛残值
dcx = 1 #求解域的横坐标离散
dcy = 1 #求解域的纵坐标离散
a = 0
b = 0
g = 0
#这里用linspace
Theta = np.linspace(0,2*math.pi,num = n1)
X0 = np.ones(n1)
Y0 = np.ones(n1)
X1 = np.ones(n1)
Y1 = np.ones(n1)
X = np.ones([n1,n2]) #存放所有点的横坐标
Y = np.ones([n1,n2]) #c
x2 = np.ones([n1,n2]) #迭代用
y2 = np.ones([n1,n2]) #迭代用
#-----------------------生成网格初值-------------------#
for i in range(0,n1):
X0[i] = 0.5 * (1 + math.cos(Theta[i]))
if(Theta[i]<math.pi):
y = 0.6 * (-0.1015 * X0[i]**4 + 0.2843 * X0[i]**3 - 0.3576 * X0[i]**2 - 0.1221 * X0[i] + 0.2969 * X0[i] ** 0.5)
else:
y = -0.6 * (-0.1015 * X0[i]**4 + 0.2843 * X0[i]**3 - 0.3576 * X0[i]**2 - 0.1221 * X0[i] + 0.2969 * X0[i] ** 0.5)
Y0[i] = y
X1[i] = 30 * math.cos(Theta[i]) + 0.5
Y1[i] = 30 * math.sin(Theta[i])
#print(X0[i],Y0[i])
for j in range(0,n2):
dx = (X1[i] - X0[i]) / (n2 - 1)
dy = (Y1[i] - Y0[i]) / (n2 - 1)
X[i][j] = X0[i] + j * dx
Y[i][j] = Y0[i] + j * dy
'''
#显示初始网格
for i in range(0,n1):
plt.plot(X[i,:],Y[i,:],'-o',c='b')
for j in range(0,n2):
plt.plot(X[:,j],Y[:,j],'-o',c='b')
plt.axis('square') #等比例显示
plt.show() #放在最外面,最后只画一张图
'''
#-----------------------生成网格初值-------------------#
#迭代解Laplace方程生成网格
while((e1 > e0) or (e2 > e0)):
for i in range(0,n1):
for j in range(1,n2-1):
if(i == 0):
a = ((X[i][j+1]-X[i][j-1]) / (2*dcy))**2 + ((Y[i][j+1]-Y[i][j-1]) / (2*dcy))**2 #alpha
b = ((X[i+1][j]-X[n1-1][j]) / (2*dcx)) * ((X[i][j+1] - X[i][j-1]) / (2*dcy)) \
+ ((Y[i+1][j]-Y[n1-1][j]) / (2*dcx))*((Y[i][j+1] - Y[i][j-1]) / (2*dcy)) #beta
g = ((X[i+1][j]-X[n1-1][j]) / (2*dcx))**2 + ((Y[i+1][j]-Y[n1-1][j]) / (2*dcx))**2 #gamma
#开始迭代
x2[i][j] = 0.5 * (a * (X[i+1][j] + X[n1-1][j]) + g * (X[i][j+1] + X[i][j-1]) \
- 0.5 * b * (X[i+1][j+1]+X[n1-1][j-1]-X[i+1][j-1]-X[n1-1][j+1])) / (a + g)
y2[i][j] = 0.5 * (a * (Y[i+1][j] + Y[n1-1][j]) + g * (Y[i][j+1] + Y[i][j-1]) \
- 0.5 * b * (Y[i+1][j+1]+ Y[n1-1][j-1]- Y[i+1][j-1]- Y[n1-1][j+1])) / (a + g)
e1 = abs(X[i][j] - x2[i][j])
e2 = abs(Y[i][j] - y2[i][j])
X[i][j] = x2[i][j] #更新迭代的x坐标
Y[i][j] = y2[i][j] #更新迭代的y坐标
#迭代方程中的三个系数,分别为一阶导数关系
else:
a = ((X[i][j+1]-X[i][j-1]) / (2*dcy))**2 + ((Y[i][j+1]-Y[i][j-1]) / (2*dcy))**2 #alpha
b = ((X[i+1][j]-X[i-1][j]) / (2*dcx)) * ((X[i][j+1] - X[i][j-1]) / (2*dcy)) \
+ ((Y[i+1][j]-Y[i-1][j]) / (2*dcx))*((Y[i][j+1] - Y[i][j-1]) / (2*dcy)) #beta
g = ((X[i+1][j]-X[i-1][j]) / (2*dcx))**2 + ((Y[i+1][j]-Y[i-1][j]) / (2*dcx))**2 #gamma
#开始迭代
x2[i][j] = 0.5 * (a * (X[i+1][j] + X[i-1][j]) + g * (X[i][j+1] + X[i][j-1]) \
- 0.5 * b * (X[i+1][j+1]+X[i-1][j-1]-X[i+1][j-1]-X[i-1][j+1])) / (a + g)
y2[i][j] = 0.5 * (a * (Y[i+1][j] + Y[i-1][j]) + g * (Y[i][j+1] + Y[i][j-1]) \
- 0.5 * b * (Y[i+1][j+1]+ Y[i-1][j-1]- Y[i+1][j-1]- Y[i-1][j+1])) / (a + g)
e1 = abs(X[i][j] - x2[i][j])
e2 = abs(Y[i][j] - y2[i][j])
X[i][j] = x2[i][j] #更新迭代的x坐标
Y[i][j] = y2[i][j] #更新迭代的y坐标
#最终绘图
for i in range(0,n1):
plt.plot(X[i,:],Y[i,:],c='b')
for j in range(0,n2):
plt.plot(X[:,j],Y[:,j],c='b')
plt.axis('square') #等比例显示
plt.show() #放在最外面,最后只画一张图
|
[
"1637232256@qq.com"
] |
1637232256@qq.com
|
|
72359d15c10f05c12783657d8a41809cb92f774b
|
21c09799d006ed6bede4123d57d6d54d977c0b63
|
/PrintOut.py
|
31c51a8b387d5270f2d155b82ae290929209aa36
|
[] |
no_license
|
corvettettt/DijetRootTreeAnalyzer
|
68cb12e6b280957e1eb22c9842b0b9b30ae2c779
|
e65624ffc105798209436fc80fb82e2c252c6344
|
refs/heads/master
| 2021-05-06T09:57:12.816787
| 2019-04-18T15:32:38
| 2019-04-18T15:32:38
| 114,043,763
| 1
| 0
| null | 2017-12-12T22:02:46
| 2017-12-12T22:02:46
| null |
UTF-8
|
Python
| false
| false
| 595
|
py
|
import os
#for i in [0.1,0.15,0.2,0.3,0.35,0.45,0.5,0.5803,0.65,0.75,0.8,0.8838,0.9,0.9693]:
# cut = str(int(i*1000))
# os.system('python python/Plot1DLimit.py -d cards_PF0bDijet2017Scan'+cut+'_scan_ns/ -m qq -b PF0bDijet2017Scan'+cut+' -l 41.800 --massMin 1000 --massMax 8000 --xsecMin 1e-4 --xsecMax 1e2 -o exp')
for i in ['M','L','T']:
for j in ['1b','le1b']:
tag = 'PFNo1dDijet2017bgDeepJet'+j+i
print '\n\t'+tag
os.system('python python/Plot1DLimit.py -d cards_'+tag+'_scan/ -m qg -b '+tag+' -l 41.800 --massMin 1000 --massMax 8000 --xsecMin 1e-4 --xsecMax 1e2 -o obs')
|
[
"zhixing.wang@ttu.edu"
] |
zhixing.wang@ttu.edu
|
d09077a77e985ee9243a979fe56958da14e954bd
|
4101f575c913a1ccbfcbe16314fb343f8ddb2ea2
|
/27.Linked_List_Cycle_II/27.Linked_List_Cycle_II.py
|
346eea4bfbdb108a6398dbfee8fa4ca78f82cf83
|
[] |
no_license
|
Gangadharbhuvan/31-Days-October-Leetcode
|
a5959b25202d847daeb0f8ddc696838b068b01dc
|
8515aa8cfd072b61f7b00be267f96c688474021b
|
refs/heads/master
| 2023-01-04T01:28:15.974195
| 2020-10-31T17:06:56
| 2020-10-31T17:06:56
| 300,222,126
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,683
|
py
|
'''
Given a linked list, return the node where the cycle begins. If there is no cycle, return null.
There is a cycle in a linked list if there is some node in the list that can be reached again by continuously following the next pointer. Internally, pos is used to denote the index of the node that tail's next pointer is connected to. Note that pos is not passed as a parameter.
Notice that you should not modify the linked list.
Follow up:
Can you solve it using O(1) (i.e. constant) memory?
Example 1:
Input: head = [3,2,0,-4], pos = 1
Output: tail connects to node index 1
Explanation: There is a cycle in the linked list, where tail connects to the second node.
Example 2:
Input: head = [1,2], pos = 0
Output: tail connects to node index 0
Explanation: There is a cycle in the linked list, where tail connects to the first node.
Example 3:
Input: head = [1], pos = -1
Output: no cycle
Explanation: There is no cycle in the linked list.
Constraints:
The number of the nodes in the list is in the range [0, 104].
-105 <= Node.val <= 105
pos is -1 or a valid index in the linked-list.
'''
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def detectCycle(self, head: ListNode) -> ListNode:
slow = fast = head
while fast and fast.next:
slow, fast = slow.next, fast.next.next
if slow == fast: break
if not fast or not fast.next: return
slow = head
while slow != fast:
slow, fast = slow.next, fast.next
return slow
|
[
"noreply@github.com"
] |
Gangadharbhuvan.noreply@github.com
|
d820e8e0ac93df5fd014839584386acd0509363e
|
abbc2d332bdfa036ac12438983e6d74cf4107e64
|
/SiamFCpp/SiamFCpp-video_analyst/siamfcpp/evaluation/got_benchmark/experiments/trackingnet.py
|
8492e40ea56be71f3e098bb843cac2ed8f449c58
|
[] |
permissive
|
HonglinChu/SiamTrackers
|
c494cff7543a433e8ec7dbf6d9439b1e7395b0c0
|
805208b5348346d35e64abcbe901a3829743e157
|
refs/heads/master
| 2023-08-29T06:50:59.532271
| 2023-03-06T09:13:53
| 2023-03-06T09:13:53
| 253,718,080
| 1,166
| 243
|
Apache-2.0
| 2023-08-03T16:39:53
| 2020-04-07T07:24:00
|
Python
|
UTF-8
|
Python
| false
| false
| 3,041
|
py
|
from __future__ import absolute_import
import os
import numpy as np
from loguru import logger
from .otb import ExperimentOTB
from ..datasets import TrackingNet
from ..utils.metrics import rect_iou, center_error
from ..utils.ioutils import compress
class ExperimentTrackingNet(ExperimentOTB):
r"""Experiment pipeline and evaluation toolkit for TrackingNet dataset.
Only the TEST subset part implemented.
Args:
root_dir (string): Root directory of LaSOT dataset.
subset (string, optional): Specify ``train`` or ``test`` or ``train0,1,...``
subset of TrackingNet. Default is ``test``.
return_meta (bool, optional): whether to fetch meta info
(occlusion or out-of-view). Default is ``False``.
result_dir (string, optional): Directory for storing tracking
results. Default is ``./results``.
report_dir (string, optional): Directory for storing performance
evaluation results. Default is ``./reports``.
"""
def __init__(self, root_dir, subset='test', return_meta=False,
result_dir='results', report_dir='reports'):
# assert subset.upper() in ['TRAIN', 'TEST']
assert subset.startswith(('train', 'test')), 'Unknown subset.'
self.subset = subset
self.dataset = TrackingNet(root_dir, subset, return_meta=return_meta)
self.result_dir = os.path.join(result_dir, 'TrackingNet')
self.report_dir = os.path.join(report_dir, 'TrackingNet')
# as nbins_iou increases, the success score
# converges to the average overlap (AO)
self.nbins_iou = 21
self.nbins_ce = 51
def report(self, tracker_names, *args, plot_curves=True, **kwargs):
if self.subset == 'test':
logger.info("TEST subset's annotations are withholded, generate submission file instead...")
for tracker_name in tracker_names:
# compress all tracking results
result_dir = os.path.join(self.result_dir, tracker_name)
save_file = result_dir
compress(result_dir, save_file)
print('Records saved at', save_file + '.zip')
# print submission guides
print('\033[93mLogin and follow instructions on')
print('http://eval.tracking-net.org/')
print('to upload and evaluate your tracking results\033[0m')
performance = None
else:
performance = super(ExperimentTrackingNet, self).report(tracker_names, *args, plot_curves=plot_curves, **kwargs)
return performance
# def _calc_metrics(self, boxes, anno):
# valid = ~np.any(np.isnan(anno), axis=1)
# if len(valid) == 0:
# print('Warning: no valid annotations')
# return None, None
# else:
# ious = rect_iou(boxes[valid, :], anno[valid, :])
# center_errors = center_error(
# boxes[valid, :], anno[valid, :])
# return ious, center_errors
|
[
"chuhonglin@chuhonglindeMacBook-Pro.local"
] |
chuhonglin@chuhonglindeMacBook-Pro.local
|
d8b5ea5c66a9b6e027e0b4f8aa070b5bbbeb9024
|
8938c94953e30aefebc28a9321da16976345072f
|
/bound_detect.py
|
ffc1a50cbf77fb1c2ccb2a3d6ebc7d75da3a25e6
|
[] |
no_license
|
liu1073811240/Opencv-4
|
ace1df112bf1ba58620086a32237fe0e08b914bc
|
afc4a676f1fb0f6c47a9bb1ae039791ef6e77467
|
refs/heads/master
| 2023-01-12T00:21:35.171391
| 2020-11-15T03:30:04
| 2020-11-15T03:30:04
| 312,950,405
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,502
|
py
|
import cv2
import numpy as np
# "边界检测: 边界矩形、最小(面积)矩形、最小外接圆以及椭圆拟合、直线拟合"
img = cv2.imread("./images/23.jpg")
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(img_gray, 127, 255, 0)
contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# 边界矩形
x, y, w, h = cv2.boundingRect(contours[0]) # 根据轮廓点来获取边界框的坐标
img_contour = cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imshow("img_contour", img_contour)
# 最小矩形
rect = cv2.minAreaRect(contours[0]) # 得到最小外接矩形的(中心(x,y), (宽,高), 旋转角度)
print(rect)
box = cv2.boxPoints(rect) # 获取最小外接矩形的4个顶点坐标
print(box)
print(box.dtype, box.shape)
box = np.int32(box)
print(box.dtype, box.shape)
img_contour1 = cv2.drawContours(img, [box], 0, (0, 0, 255), 2)
cv2.imshow("img_contour1", img_contour1)
# 最小外接圆
(x, y),radius = cv2.minEnclosingCircle(contours[0]) # 根据轮廓点找到最小闭合圆的中心点坐标,半径
center = (int(x), int(y))
radius = int(radius)
img_contour3 = cv2.circle(img, center, radius, (255, 0, 0), 2)
cv2.imshow("img_contour3", img_contour3)
# 椭圆拟合
ellipse = cv2.fitEllipse(contours[0]) # 根据轮廓点找到椭圆
print(ellipse)
img_contour4 = cv2.ellipse(img, ellipse, (0, 255, 255), 2)
cv2.imshow("img_contour4", img_contour4)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"1073811240@qq.com"
] |
1073811240@qq.com
|
0dd50d721f6ae31735f10048c04f2c5ae82ff43a
|
936a8929956eb82b08c8f48c9bf2e7fb621412df
|
/dlaicourse/course2_part6_lesson3.py
|
3192b2c2042036998a334d61bec0d6b33545bcd5
|
[] |
no_license
|
chenxu0602/TensorFlow2.0
|
3871b9b0cd391ca65938da7d3409e4667acb929c
|
c13742750ad09d8ec26fabe32578d27c927701a8
|
refs/heads/master
| 2021-07-12T11:10:08.665940
| 2021-02-27T09:21:29
| 2021-02-27T09:21:29
| 237,641,001
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,341
|
py
|
import os
import tensorflow as tf
local_weights_file = 'inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'
pre_trained_model = tf.keras.applications.inception_v3.InceptionV3(
input_shape=(150, 150, 3),
include_top=False,
weights=None)
pre_trained_model.load_weights(local_weights_file)
for layer in pre_trained_model.layers:
layer.trainable = True
last_layer = pre_trained_model.get_layer("mixed7")
print("last year output shape: ", last_layer.output_shape)
last_output = last_layer.output
x = tf.keras.layers.Flatten()(last_output)
x = tf.keras.layers.Dense(1024, activation="relu")(x)
x = tf.keras.layers.Dropout(0.2)(x)
x = tf.keras.layers.Dense(1, activation="sigmoid")(x)
model = tf.keras.Model(pre_trained_model.input, x)
model.compile(optimizer=tf.keras.optimizers.RMSprop(lr=0.001),
loss="binary_crossentropy",
metrics=["accuracy"])
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os
import zipfile
# local_zip = 'cats_and_dogs_filtered.zip'
#
# zip_ref = zipfile.ZipFile(local_zip, 'r')
#
# zip_ref.extractall()
# zip_ref.close()
# Define our example directories and files
base_dir = 'cats_and_dogs_filtered'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
train_cats_dir = os.path.join(train_dir, 'cats') # Directory with our training cat pictures
train_dogs_dir = os.path.join(train_dir, 'dogs') # Directory with our training dog pictures
validation_cats_dir = os.path.join(validation_dir, 'cats') # Directory with our validation cat pictures
validation_dogs_dir = os.path.join(validation_dir, 'dogs')# Directory with our validation dog pictures
train_cat_fnames = os.listdir(train_cats_dir)
train_dog_fnames = os.listdir(train_dogs_dir)
train_datagen = ImageDataGenerator(rescale=1/255.,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1/255.)
train_generator = train_datagen.flow_from_directory(train_dir,
batch_size=20,
class_mode="binary",
target_size=(150, 150))
validation_generator = test_datagen.flow_from_directory(validation_dir,
batch_size=20,
class_mode="binary",
target_size=(150, 150))
history = model.fit(
train_generator,
validation_data=validation_generator,
steps_per_epoch=100,
epochs=20,
validation_steps=50,
verbose=2)
import matplotlib.pyplot as plt
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'r', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend(loc=0)
plt.figure()
plt.show()
|
[
"chenxu@Chens-iMac.local"
] |
chenxu@Chens-iMac.local
|
00440d45b3060c0984ce7edc9e4ac40220ccd1bb
|
5ff8cefa68d52d2427bb3d35320cd8bd0d072968
|
/Python/function.py
|
b37cd2866485f3b2690cac066f765b069d08c364
|
[] |
no_license
|
gsudarshan1990/PythonSampleProjects
|
a65a111454f8dc551f1cd29901cead0798ad6dc3
|
3c1a5174c5f966b0eed2828221add76ec0d019d5
|
refs/heads/master
| 2020-05-09T16:02:37.743568
| 2019-07-14T06:22:55
| 2019-07-14T06:22:55
| 181,255,262
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 796
|
py
|
def say_hi():
print('Hi! Welcome to python functions')
def program_to_add_two_numbers():
data1=input('Enter the first value')
data2=input('Enter the second value')
data3=int(data1)+int(data2)
print(data3)
def say_hi_from_command_line(name):
print('Hello {} how are you'.format(name))
def say_hi_from_command_line(name='sudarshan'):
print('Hello {} how are you'.format(name))
def say_hi_multiple_arguments(firstname,lastname):
print('Hello {} {}. How are you'.format(firstname,lastname))
say_hi_multiple_arguments('sudarshan','Govindarajan')
def Check_even_or_odd(number):
"""This program is for determining whether a number is even or odd"""
if number%2 == 0:
return 'EVEN'
else:
return 'ODD'
print(Check_even_or_odd(8))
|
[
"sudharshan.govindarajan@ey.com"
] |
sudharshan.govindarajan@ey.com
|
98646c1825664b848ab78bddca0a52fea3d4f37a
|
446b36ebe2eae156fbac7dcf1c50d467bd8bda93
|
/artellapipe/__version__.py
|
059e18e19b275b625118ed020ba630246c2024ef
|
[
"MIT"
] |
permissive
|
ArtellaPipe/artellapipe
|
cd08f29d4ac8ca0eb304f944864632c1f98f81cb
|
3400f6a55f124f639143fe01c559059eaba23b22
|
refs/heads/master
| 2023-04-06T13:08:34.445823
| 2021-01-29T22:52:23
| 2021-01-29T22:52:23
| 197,077,090
| 8
| 0
|
MIT
| 2023-03-21T22:35:32
| 2019-07-15T21:48:38
|
Python
|
UTF-8
|
Python
| false
| false
| 508
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Version module for artellapipe
"""
from __future__ import print_function, division, absolute_import
__author__ = "Tomas Poveda"
__license__ = "MIT"
__maintainer__ = "Tomas Poveda"
__email__ = "tpovedatd@gmail.com"
__version__ = None
def get_version():
global __version__
if __version__:
return __version__
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
return __version__
|
[
"tpovedatd@gmail.com"
] |
tpovedatd@gmail.com
|
4742e8ffbe6d84c6a4f32e83697808989bfb9c8c
|
cd80ef21610010b534430cc9de5c47cddc8b351c
|
/tests/test_utils/test_load_raw_schema.py
|
d2e58a61b20637fa70e8e744949bf61779f4f7af
|
[
"CC0-1.0"
] |
permissive
|
lushacao/biolinkml
|
7124437b0fc6cd2fb8e84fa50e5e187513693fb0
|
a492ec8e0d5dc464407b25a70d363674131155bf
|
refs/heads/master
| 2020-05-26T15:13:31.416209
| 2019-04-09T14:53:55
| 2019-04-09T14:53:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,482
|
py
|
import os
import unittest
from typing import Callable
from jsonasobj import as_json, loads, as_dict
from biolinkml import METAMODEL_URI
from biolinkml.meta import SchemaDefinition
from biolinkml.utils.rawloader import load_raw_schema
from biolinkml.utils.schemaloader import SchemaLoader
from tests.test_utils import datadir
class RawLoaderTestCase(unittest.TestCase):
def _verify_schema1_content(self, schema: SchemaDefinition, source_file,
addl_checks: Callable[[SchemaDefinition], None]=None) -> None:
expected = loads(f"""{{
"name": "{source_file}",
"id": "http://example.org/{source_file}",
"title": "Load Raw Schema Test",
"metamodel_version": "0.5.0",
"source_file": "{source_file}.yaml",
"source_file_date": "Mon Dec 31 11:25:38 2018",
"source_file_size": 76,
"generation_date": "2018-12-31 11:50"
}}""")
schema.source_file = os.path.basename(schema.source_file)
if addl_checks:
addl_checks(schema)
self.assertTrue(isinstance(schema.metamodel_version, str))
expected.metamodel_version = schema.metamodel_version
self.assertTrue(isinstance(schema.source_file_date, str))
expected.source_file_date = schema.source_file_date
self.assertTrue(isinstance(schema.source_file_size, int))
expected.source_file_size = schema.source_file_size
self.assertTrue(isinstance(schema.generation_date, str))
expected.generation_date = schema.generation_date
self.assertEqual(expected, loads(as_json(schema)))
def test_load_raw_file(self):
""" Test loading a data file """
self._verify_schema1_content(load_raw_schema(os.path.join(datadir, 'schema1.yaml')), 'schema1')
# Verify that we can't pass source_file parameters when we've got a directory name
with self.assertRaises(AssertionError):
load_raw_schema(os.path.join(datadir, 'schema1.yaml'), source_file_size=117)
def test_explicit_name(self):
""" Test the named schema option """
self._verify_schema1_content(load_raw_schema(os.path.join(datadir, 'schema2.yaml')), 'schema2')
def test_multi_schemas(self):
""" Test multiple schemas in the same file """
def check_types(s: SchemaDefinition) -> None:
self.assertEqual({
'integer': {'base': 'int',
'from_schema': 'http://example.org/schema5',
'name': 'integer'},
'string': {'base': 'str',
'from_schema': 'http://example.org/schema4',
'name': 'string'}},
{k: as_dict(loads(as_json(v))) for k, v in s.types.items()})
s.types = None
self._verify_schema1_content(load_raw_schema(os.path.join(datadir, 'schema4.yaml')), 'schema4', check_types)
def test_base_dir(self):
""" Test the base directory option """
self._verify_schema1_content(load_raw_schema('schema1.yaml', base_dir=datadir), 'schema1')
def test_schema_id(self):
""" Test loading a schema with just an id """
self._verify_schema1_content(load_raw_schema('schema3.yaml', base_dir=datadir), 'schema3')
def test_name_from_sourcefile(self):
""" Test no identifier at all """
with self.assertRaises(ValueError):
load_raw_schema(os.path.join(datadir, 'schema5.yaml'))
def test_load_text(self):
""" Test loading straight text """
with open(os.path.join(datadir, 'schema1.yaml')) as f:
self._verify_schema1_content(load_raw_schema(f.read(), 'schema1.yaml', "Mon Dec 31 11:25:38 2018", 76),
'schema1')
def test_representation_errors(self):
""" Test misformed schema elements """
fn = os.path.join(datadir, 'typeerror1.yaml')
with self.assertRaises(ValueError):
SchemaLoader(fn)
fn = os.path.join(datadir, 'typeerror2.yaml')
with self.assertRaises(ValueError):
SchemaLoader(fn)
fn = os.path.join(datadir, 'typeerror3.yaml')
with self.assertRaises(ValueError):
SchemaLoader(fn)
fn = os.path.join(datadir, 'typeerror4.yaml')
with self.assertRaises(ValueError):
SchemaLoader(fn)
if __name__ == '__main__':
unittest.main()
|
[
"solbrig@jhu.edu"
] |
solbrig@jhu.edu
|
1d46722876803893a9031d256552fb3ff8871627
|
b92417413ec5b05ca25695de55934ce7072a0f0a
|
/test/test_v1_source_control_user.py
|
272e12fdb5ccbc057c74caf86bff1ed08f2c5e16
|
[
"Apache-2.0"
] |
permissive
|
detiber/lib_openshift
|
be1f0f1b3eec62c9bbf50a3fcea61303a870c112
|
efea21ce6f67e3d48885c03ae22978c576c0b87d
|
refs/heads/master
| 2021-01-18T04:12:00.820052
| 2016-10-04T03:20:43
| 2016-10-04T03:20:43
| 63,102,761
| 0
| 0
| null | 2016-07-11T21:15:36
| 2016-07-11T21:15:36
| null |
UTF-8
|
Python
| false
| false
| 1,320
|
py
|
# coding: utf-8
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import lib_openshift
from lib_openshift.rest import ApiException
from lib_openshift.models.v1_source_control_user import V1SourceControlUser
class TestV1SourceControlUser(unittest.TestCase):
""" V1SourceControlUser unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1SourceControlUser(self):
"""
Test V1SourceControlUser
"""
model = lib_openshift.models.v1_source_control_user.V1SourceControlUser()
if __name__ == '__main__':
unittest.main()
|
[
"jdetiber@redhat.com"
] |
jdetiber@redhat.com
|
15ed6ca2de5e81a647689b26bfa84a582b393614
|
8a699595e7f156b1ade42f6042900b3331831fbf
|
/src/transformers/models/funnel/__init__.py
|
b9c6b9608d3787ef46211639297da693cde0c8c9
|
[
"Apache-2.0"
] |
permissive
|
stas00/transformers
|
ab654371a387c5883fc882dd0286177875d6d3b4
|
7c5d79912a21880ce13d77881940458e90d98917
|
refs/heads/master
| 2023-02-16T00:22:41.298155
| 2022-04-08T20:55:42
| 2022-04-08T20:55:42
| 278,214,696
| 6
| 0
|
Apache-2.0
| 2022-01-28T18:39:00
| 2020-07-08T23:24:49
|
Python
|
UTF-8
|
Python
| false
| false
| 3,496
|
py
|
# flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available
_import_structure = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
if is_tokenizers_available():
_import_structure["tokenization_funnel_fast"] = ["FunnelTokenizerFast"]
if is_torch_available():
_import_structure["modeling_funnel"] = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
if is_tf_available():
_import_structure["modeling_tf_funnel"] = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
if is_tokenizers_available():
from .tokenization_funnel_fast import FunnelTokenizerFast
if is_torch_available():
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
if is_tf_available():
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
[
"noreply@github.com"
] |
stas00.noreply@github.com
|
5441121600dc115f36870bfe9ba621150d9a807b
|
be0c6e2071945edcb47ee4f3fadc1f4629a2c6aa
|
/grandapp/migrations/0159_gwascata.py
|
bd3973db6cc1f9639c5721276b1eec0a9fcd0592
|
[] |
no_license
|
QuackenbushLab/grand
|
9719a395e6a30951c3ffdef1eccdb5e422da737c
|
f23031d1f240550d25c2842b4af0aae08c653bae
|
refs/heads/master
| 2023-08-10T09:58:58.381264
| 2023-07-25T18:23:26
| 2023-07-25T18:23:26
| 201,113,575
| 5
| 2
| null | 2022-06-24T19:11:29
| 2019-08-07T19:18:58
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 622
|
py
|
# Generated by Django 3.0.2 on 2021-04-17 03:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('grandapp', '0158_gobpbygene'),
]
operations = [
migrations.CreateModel(
name='Gwascata',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('term', models.CharField(max_length=400)),
('genelist', models.CharField(max_length=3000)),
('idd', models.IntegerField()),
],
),
]
|
[
"marouen.b.guebila@gmail.com"
] |
marouen.b.guebila@gmail.com
|
0b66e9d9f992078632fda7ba4aab15fea059fe55
|
ecf1ce6f8b592f76c7b7c253608c1264ae0676a3
|
/days/day033/movie_logbook.py
|
b380e0535a073660df262734541a8f403ae4d0b4
|
[] |
permissive
|
alex-vegan/100daysofcode-with-python-course
|
94e99880a50ac412e398ad209ed53796f253641f
|
b6c12316abe18274b7963371b8f0ed2fd549ef07
|
refs/heads/master
| 2021-07-20T23:05:59.721661
| 2019-01-21T16:18:25
| 2019-01-21T16:18:25
| 150,115,516
| 0
| 0
|
MIT
| 2018-09-24T14:28:16
| 2018-09-24T14:28:15
| null |
UTF-8
|
Python
| false
| false
| 3,017
|
py
|
from typing import List
import requests
import collections
import os
from urllib.request import urlretrieve
from requests.exceptions import ProxyError, ConnectionError, HTTPError, SSLError
import logbook
import sys
FILE = 'movie_logbook.log'
resp_logger = logbook.Logger('Resp')
url_path = 'http://movie_service.talkpython.fm/api/search/'
Movie = collections.namedtuple('Movie', 'imdb_code, title, director, keywords, '
'duration, genres, rating, year, imdb_score')
def main(file_name=None):
level = logbook.TRACE
if file_name:
logbook.TimedRotatingFileHandler(file_name, level=level).push_application()
else:
logbook.StreamHandler(sys.stdout, level=level).push_application()
mode = 'stdout' if not file_name else 'file ' + file_name
msg = f'Logging started. level: {level} mode: {mode}'
logger = logbook.Logger('Startup')
logger.notice(msg)
try:
keyword = input('Keyword of title search: ')
if not keyword or not keyword.strip():
raise ValueError('Must specify a search term.')
url = os.path.join(url_path, keyword)
response = requests.get(url)
response.raise_for_status()
if response.status_code == 200:
print(f"OK!")
resp_logger.notice(f'Connection correctly made. '
f'Status code: {response.status_code}')
results = response.json()
movies = []
for r in results.get('hits'):
movies.append(Movie(**r))
print(f'There are {len(movies)} movies found.')
resp_logger.trace(f'For keyword << {keyword} >> has been found {len(movies)} movies')
for m in movies:
print(f"{m.title} with code {m.imdb_code} has score {m.imdb_score}")
except ProxyError:
error_msg = (f"Could not connect to proxy. "
f"Check your proxy settings.")
print(f"ERROR: " + error_msg)
resp_logger.warn(error_msg)
except ConnectionError:
error_msg = (f"Could not find server. "
f"Check your network connection.")
print(f"ERROR: " + error_msg)
resp_logger.warn(error_msg)
except HTTPError:
error_msg = (f"Could not open the HTTP page. "
f"Error number {response.status_code} "
f"Reason: {response.reason}")
print("ERROR: " + error_msg)
resp_logger.warn(error_msg)
except SSLError:
error_msg = (f"Could not open the HTTPS page. "
f"Check firewall settings and SSL certificates.")
print(f"ERROR: " + error_msg)
resp_logger.warn(error_msg)
except ValueError:
print(f"ERROR: You must specify a search term.")
resp_logger.trace(f'Search term has not been specified')
except Exception as x:
print(f"Oh that didn't work!: {x}")
resp_logger.error(f'!!! System Fatality Crash !!!')
if __name__ == '__main__':
main(FILE)
|
[
"alex-vegan@outlook.com"
] |
alex-vegan@outlook.com
|
116ad72261ac13f32e56b96af2857a706496f8f3
|
4eeb40dcc265caf4a2b84bc90a28d481930d6a8a
|
/wristwatchesproj/genericviews/urls.py
|
f85318b0a8fa4ada057cdb8357d59b5e5c64d300
|
[] |
no_license
|
mprasu/Sample-Projects
|
eb7fc46e81b09d7c97c238047e3c93b6fff3fb8d
|
7363baf630900ab2babb4af2afe77911d8a548b2
|
refs/heads/master
| 2020-04-16T06:43:16.345750
| 2019-01-12T07:07:34
| 2019-01-12T07:07:34
| 165,358,055
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 437
|
py
|
from django.conf.urls import url
from django.urls import path
from genericviews import views
#app_name = 'genericviews'
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^(?P<pk>[0-9]+)/$', views.DetailsView.as_view(), name='detail'),
url(r'^makeentry$', views.MakeEntry.as_view(), name='makeentry'),
url(r'^/(?P<pk>[0-9]+)/delete/$',views.DeleteView.as_view(),name='item-delete'),
]
|
[
"muppuriprasanna5@gmail.com"
] |
muppuriprasanna5@gmail.com
|
56f1079c44dd79e3fc140abdf1cdfe15a672ec6e
|
33a917d58f796de42d3f90c7ab0a0353066c8a4b
|
/constants.py
|
20f9cd9dd32e0f3ec6ecab79f4d3222233c424e0
|
[] |
no_license
|
evanthebouncy/tangram
|
d214cec6e164e9b21d83cf2fcacf753d8ca837e5
|
c8a7c13bcad7c9b4c96a6b296db707ddcf32241b
|
refs/heads/master
| 2021-04-26T23:06:12.465094
| 2018-07-18T13:46:00
| 2018-07-18T13:46:00
| 123,930,799
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
# all the constant globals shared across codes
OPS = ['P', 'H', 'V']
n_hidden = 40
# n_hidden = 120 # good embedding bad decomposition
large_hidden = 10 * n_hidden
L = 6
SHAPE_TYPES = ['1', '2', '3', '4', '5']
# SHAPES = ['1', '1', '2', '3', '4', '5', '5']
SHAPES = ['1', '2', '3']
# SHAPES = ['1', '2', '3', '4', '5']
ORIENTATIONS = [1, 2, 3, 4]
SXO = [(s,o) for s in SHAPE_TYPES for o in ORIENTATIONS]
ACTIONS = ['H', 'V'] + SXO
|
[
"evanthebouncy@gmail.com"
] |
evanthebouncy@gmail.com
|
a06cf4d1ec1fe5c2ef51f4f2e617bb1fda1ec0c7
|
137ba8a70dfcf94dfe7aeef1599341ecc06ca48f
|
/student_result/2018/04_parsing/parsing_13.py
|
485d391f72037db4058ecdbac9b7637466f7058f
|
[] |
no_license
|
smtamh/oop_python_ex
|
e1d3a16ade54717d6cdf1759b6eba7b27cfc974e
|
bd58ee3bf13dad3de989d5fd92e503d5ff949dd9
|
refs/heads/master
| 2020-09-22T08:35:19.847656
| 2019-11-13T02:47:06
| 2019-11-13T02:47:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,971
|
py
|
import bs4
import requests
# pip3 install requests
# pip3 install beautifulsoup4
def get_html(url):
"""웹사이트 주소를 입력받아 html tag를 읽어 반환"""
response = requests.get(url)
response.raise_for_status()
return response.text
# 전달받은 페이지의 제목, 별점, 리뷰를 반환 함수
def movie_review_page(page):
html = get_html(
'https://movie.naver.com/movie/point/af/list.nhn?&page='+str(page))
soup = bs4.BeautifulSoup(html, 'html.parser')
review_point = soup.select('div#old_content td.point')
review_title = soup.select('div#old_content td.title')
review = []
for i, j in zip(review_title, review_point):
content = i.getText().strip().strip("신고").strip().split('\n')
point = int(j.getText().strip())
review.append([content[0], point, content[1]])
return review
# a 페이지부터 b 페이지까지 리뷰 합산해서 반환
def review_index(a, b):
review = []
for i in range(a, b+1):
review.extend(movie_review_page(i))
return review
# 리뷰 출력
def print_review(review, a, b):
print("%d page ~ %d page" % (a, b))
for r in review:
print("%s (%d) : %s" % (r[0], r[1], r[2]))
# 원하는 영화의 평균평점을 확인
def title_avg_point(review, title):
count = 0
sum = 0
for r in review:
if(r[0] == title):
count += 1
sum += r[1]
try:
return sum/count
except ZeroDivisionError:
print('<%s>영화의 리뷰가 없습니다' % (title))
a, b = map(int, input('페이지를 띄어쓰기로 구분해 입력해주세요(ex: 1 20) :').split())
review = review_index(a, b)
# a~b페이지까지 모든 리뷰확인
print('리뷰를 확인합니다.')
print_review(review, a, b)
# 영화 평점 확인
title = input('\n평점을 확인하고싶은 영화를 입력해주세요 : ')
print("%.3f" % title_avg_point(review, title))
|
[
"kadragon@sasa.hs.kr"
] |
kadragon@sasa.hs.kr
|
a5c74481adc2762f626d94daf9deb43125532d4c
|
162e2588156cb2c0039c926c5c442363d9f77b00
|
/data_steward/cdr_cleaner/cleaning_rules/covid_ehr_vaccine_concept_suppression.py
|
1601ac11a20c04fcd9a8cadea05debe08ac71228
|
[
"MIT"
] |
permissive
|
nishanthpp93/curation
|
38be687240b52decc25ffb7b655f25e9faa40e47
|
ac9f38b2f4580ae806121dd929293159132c7d2a
|
refs/heads/develop
| 2022-08-08T20:33:53.125216
| 2021-12-03T21:38:48
| 2021-12-03T21:38:48
| 155,608,471
| 1
| 0
|
MIT
| 2020-10-09T01:14:39
| 2018-10-31T18:54:34
|
Python
|
UTF-8
|
Python
| false
| false
| 6,340
|
py
|
"""
Suppress COVID EHR vaccine concepts.
Original Issues: DC-1692
"""
# Python imports
import logging
# Project imports
from cdr_cleaner.cleaning_rules.deid.concept_suppression import AbstractBqLookupTableConceptSuppression
from constants.cdr_cleaner import clean_cdr as cdr_consts
from common import JINJA_ENV, CDM_TABLES
from utils import pipeline_logging
# Third party imports
from google.cloud.exceptions import GoogleCloudError
LOGGER = logging.getLogger(__name__)
SUPPRESSION_RULE_CONCEPT_TABLE = 'covid_vaccine_concepts'
COVID_VACCINE_CONCEPT_QUERY = JINJA_ENV.from_string("""
CREATE OR REPLACE TABLE `{{project_id}}.{{sandbox_id}}.{{concept_suppression_lookup_table}}` AS
with covid_vacc as (
SELECT *
FROM `{{project_id}}.{{dataset_id}}.concept`
WHERE (
-- done by name and vocab --
REGEXP_CONTAINS(concept_name, r'(?i)(COVID)') AND
REGEXP_CONTAINS(concept_name, r'(?i)(VAC)') AND
vocabulary_id not in ('PPI')
) OR (
-- done by code and vocab --
REGEXP_CONTAINS(concept_code, r'(207)|(208)|(210)|(211)|(212)')
and vocabulary_id = 'CVX'
) OR (
-- done by code and vocab --
REGEXP_CONTAINS(concept_code, r'(91300)|(91301)|(91302)|(91303)|(91304)')
and vocabulary_id = 'CPT4'
)
),
concepts_via_cr as (
select distinct c.*
from `{{project_id}}.{{dataset_id}}.concept`as c
left join `{{project_id}}.{{dataset_id}}.concept_relationship`
on c.concept_id = concept_id_1
where concept_id_2 in (select concept_id from covid_vacc)
# and concept_id_1 not in (select concept_id from covid_vacc)
and (
relationship_id not in ('Subsumes', 'RxNorm dose form of', 'Dose form group of', 'RxNorm - SPL') OR
(relationship_id = 'RxNorm - SPL' and REGEXP_CONTAINS(concept_name, r'(?i)(COVID)'))
)
),
concepts_via_ca as (
select c.*
from `{{project_id}}.{{dataset_id}}.concept`as c
left join `{{project_id}}.{{dataset_id}}.concept_ancestor` as ca
on c.concept_id = ca.descendant_concept_id
where ca.ancestor_concept_id in (select concept_id from covid_vacc)
)
select distinct * from covid_vacc
union distinct
select distinct * from concepts_via_ca
union distinct
select distinct * from concepts_via_cr
""")
class CovidEHRVaccineConceptSuppression(AbstractBqLookupTableConceptSuppression
):
def __init__(self,
project_id,
dataset_id,
sandbox_dataset_id,
table_namer=None):
"""
Initialize the class with proper information.
Set the issue numbers, description and affected datasets. As other tickets may affect
this SQL, append them to the list of Jira Issues.
DO NOT REMOVE ORIGINAL JIRA ISSUE NUMBERS!
"""
desc = "Suppress COVID EHR vaccine concepts."
super().__init__(
issue_numbers=['DC1692'],
description=desc,
affected_datasets=[cdr_consts.REGISTERED_TIER_DEID],
affected_tables=CDM_TABLES,
project_id=project_id,
dataset_id=dataset_id,
sandbox_dataset_id=sandbox_dataset_id,
concept_suppression_lookup_table=SUPPRESSION_RULE_CONCEPT_TABLE,
table_namer=table_namer)
def create_suppression_lookup_table(self, client):
concept_suppression_lookup_query = COVID_VACCINE_CONCEPT_QUERY.render(
project_id=self.project_id,
dataset_id=self.dataset_id,
sandbox_id=self.sandbox_dataset_id,
concept_suppression_lookup_table=self.
concept_suppression_lookup_table)
query_job = client.query(concept_suppression_lookup_query)
result = query_job.result()
if hasattr(result, 'errors') and result.errors:
LOGGER.error(f"Error running job {result.job_id}: {result.errors}")
raise GoogleCloudError(
f"Error running job {result.job_id}: {result.errors}")
def validate_rule(self, client, *args, **keyword_args):
"""
Validates the cleaning rule which deletes or updates the data from the tables
Method to run validation on cleaning rules that will be updating the values.
For example:
if your class updates all the datetime fields you should be implementing the
validation that checks if the date time values that needs to be updated no
longer exists in the table.
if your class deletes a subset of rows in the tables you should be implementing
the validation that checks if the count of final final row counts + deleted rows
should equals to initial row counts of the affected tables.
Raises RunTimeError if the validation fails.
"""
raise NotImplementedError("Please fix me.")
def setup_validation(self, client, *args, **keyword_args):
"""
Run required steps for validation setup
Method to run to setup validation on cleaning rules that will be updating or deleting the values.
For example:
if your class updates all the datetime fields you should be implementing the
logic to get the initial list of values which adhere to a condition we are looking for.
if your class deletes a subset of rows in the tables you should be implementing
the logic to get the row counts of the tables prior to applying cleaning rule
"""
raise NotImplementedError("Please fix me.")
if __name__ == '__main__':
import cdr_cleaner.args_parser as parser
import cdr_cleaner.clean_cdr_engine as clean_engine
ARGS = parser.parse_args()
pipeline_logging.configure(level=logging.DEBUG, add_console_handler=True)
if ARGS.list_queries:
clean_engine.add_console_logging()
query_list = clean_engine.get_query_list(
ARGS.project_id, ARGS.dataset_id, ARGS.sandbox_dataset_id,
[(CovidEHRVaccineConceptSuppression,)])
for query in query_list:
LOGGER.info(query)
else:
clean_engine.add_console_logging(ARGS.console_log)
clean_engine.clean_dataset(ARGS.project_id, ARGS.dataset_id,
ARGS.sandbox_dataset_id,
[(CovidEHRVaccineConceptSuppression,)])
|
[
"noreply@github.com"
] |
nishanthpp93.noreply@github.com
|
a3da500227494dc20d9023e0ec3170314752b6aa
|
6b2c9c056c2fc7a9c622355e9a00da00c22d626a
|
/venv/Lib/site-packages/pip/_internal/locations.py
|
6bfbe43dc4ebb7de0c4a0bda1e64c2459cba740c
|
[] |
no_license
|
LeoM666/GameKlanRedLion
|
0a42f06add1ff396d375c2ace63d7112f41281f6
|
5cb22b8326bffcaf351180497b7c47b43e316621
|
refs/heads/master
| 2020-06-11T20:21:02.154926
| 2019-08-03T07:45:33
| 2019-08-03T07:45:33
| 194,073,919
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,934
|
py
|
"""Locations where we look for configs, install stuff, etc"""
from __future__ import absolute_import
import os
import os.path
import platform
import site
import sys
import sysconfig
from distutils import sysconfig as distutils_sysconfig
from distutils.command.install import SCHEME_KEYS # type: ignore
from pip._internal.utils import appdirs
from pip._internal.utils.compat import WINDOWS, expanduser
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Any, Union, Dict, List, Optional
# Application Directories
USER_CACHE_DIR = appdirs.user_cache_dir("pip")
DELETE_MARKER_MESSAGE = '''\
This file is placed here by pip to indicate the source was put
here by pip.
Once this package is successfully installed this source code will be
deleted (unless you remove this file).
'''
PIP_DELETE_MARKER_FILENAME = 'pip-delete-this-directory.txt'
def write_delete_marker_file(directory):
# type: (str) -> None
"""
Write the pip delete marker file into this directory.
"""
filepath = os.path.join(directory, PIP_DELETE_MARKER_FILENAME)
with open(filepath, 'w') as marker_fp:
marker_fp.write(DELETE_MARKER_MESSAGE)
def running_under_virtualenv():
# type: () -> bool
"""
Return True if we're running inside a virtualenv, False otherwise.
"""
if hasattr(sys, 'real_prefix'):
return True
elif sys.prefix != getattr(sys, "base_prefix", sys.prefix):
return True
return False
def virtualenv_no_global():
# type: () -> bool
"""
Return True if in a venv and no system site packages.
"""
# this mirrors the logic in virtualenv.py for locating the
# no-global-site-packages.txt file
site_mod_dir = os.path.dirname(os.path.abspath(site.__file__))
no_global_file = os.path.join(site_mod_dir, 'no-global-site-packages.txt')
if running_under_virtualenv() and os.path.isfile(no_global_file):
return True
else:
return False
if running_under_virtualenv():
src_prefix = os.path.join(sys.prefix, 'src')
else:
# FIXME: keep src in cwd for now (it is not a temporary folder)
try:
src_prefix = os.path.join(os.getcwd(), 'src')
except OSError:
# In case the current working directory has been renamed or deleted
sys.exit(
"The folder you are executing pip from can no longer be found."
)
# under macOS + virtualenv sys.prefix is not properly resolved
# it is something like /path/to/python/bin/..
# Note: using realpath due to tmp dirs on OSX being symlinks
src_prefix = os.path.abspath(src_prefix)
# FIXME doesn't account for venv linked to global site-packages
site_packages = sysconfig.get_path("purelib") # type: Optional[str]
# This is because of a bug in PyPy's sysconfig module, see
# https://bitbucket.org/pypy/pypy/issues/2506/sysconfig-returns-incorrect-paths
# for more information.
if platform.python_implementation().lower() == "pypy":
site_packages = distutils_sysconfig.get_python_lib()
try:
# Use getusersitepackages if this is present, as it ensures that the
# value is initialised properly.
user_site = site.getusersitepackages()
except AttributeError:
user_site = site.USER_SITE
user_dir = expanduser('~')
if WINDOWS:
bin_py = os.path.join(sys.prefix, 'Scripts')
bin_user = os.path.join(user_site, 'Scripts')
# buildout uses 'bin' on Windows too?
if not os.path.exists(bin_py):
bin_py = os.path.join(sys.prefix, 'bin')
bin_user = os.path.join(user_site, 'bin')
config_basename = 'pip.ini'
legacy_storage_dir = os.path.join(user_dir, 'pip')
legacy_config_file = os.path.join(
legacy_storage_dir,
config_basename,
)
else:
bin_py = os.path.join(sys.prefix, 'bin')
bin_user = os.path.join(user_site, 'bin')
config_basename = 'pip.conf'
legacy_storage_dir = os.path.join(user_dir, '.pip')
legacy_config_file = os.path.join(
legacy_storage_dir,
config_basename,
)
# Forcing to use /usr/local/bin for standard macOS framework installs
# Also log to ~/MainHall/Logs/ for use with the Console.app log viewer
if sys.platform[:6] == 'darwin' and sys.prefix[:16] == '/System/MainHall/':
bin_py = '/usr/local/bin'
global_config_files = [
os.path.join(path, config_basename)
for path in appdirs.site_config_dirs('pip')
]
site_config_file = os.path.join(sys.prefix, config_basename)
new_config_file = os.path.join(appdirs.user_config_dir("pip"), config_basename)
def distutils_scheme(dist_name, user=False, home=None, root=None,
isolated=False, prefix=None):
# type:(str, bool, str, str, bool, str) -> dict
"""
Return a distutils install scheme
"""
from distutils.dist import Distribution
scheme = {}
if isolated:
extra_dist_args = {"script_args": ["--no-user-cfg"]}
else:
extra_dist_args = {}
dist_args = {'name': dist_name} # type: Dict[str, Union[str, List[str]]]
dist_args.update(extra_dist_args)
d = Distribution(dist_args)
# Ignoring, typeshed issue reported python/typeshed/issues/2567
d.parse_config_files()
# NOTE: Ignoring type since mypy can't find attributes on 'Command'
i = d.get_command_obj('install', create=True) # type: Any
assert i is not None
# NOTE: setting user or home has the side-effect of creating the home dir
# or user base for installations during finalize_options()
# ideally, we'd prefer a scheme class that has no side-effects.
assert not (user and prefix), "user={} prefix={}".format(user, prefix)
i.user = user or i.user
if user:
i.prefix = ""
i.prefix = prefix or i.prefix
i.home = home or i.home
i.root = root or i.root
i.finalize_options()
for key in SCHEME_KEYS:
scheme[key] = getattr(i, 'install_' + key)
# install_lib specified in setup.cfg should install *everything*
# into there (i.e. it takes precedence over both purelib and
# platlib). Note, i.install_lib is *always* set after
# finalize_options(); we only want to override here if the user
# has explicitly requested it hence going back to the config
# Ignoring, typeshed issue reported python/typeshed/issues/2567
if 'install_lib' in d.get_option_dict('install'): # type: ignore
scheme.update(dict(purelib=i.install_lib, platlib=i.install_lib))
if running_under_virtualenv():
scheme['headers'] = os.path.join(
sys.prefix,
'include',
'site',
'python' + sys.version[:3],
dist_name,
)
if root is not None:
path_no_drive = os.path.splitdrive(
os.path.abspath(scheme["headers"]))[1]
scheme["headers"] = os.path.join(
root,
path_no_drive[1:],
)
return scheme
|
[
"leonboiko061@gmail.com"
] |
leonboiko061@gmail.com
|
4cc0e90527df6b74090fa2055c2f1b2087e39b1c
|
45da2549e8943738618ad1b774abeaa13ed463b0
|
/cian_parser/management/commands/urls_parser_cian.py
|
f057e37c493dde6d53c5645a5f1fde7ad115c543
|
[] |
no_license
|
ShashkinRoman/cian_parser
|
02ea484457123915e6c7d3def90f5bc76ae5893b
|
8a4bb0b8bb2cf5fd1a059a568bd862bdeafa8bc1
|
refs/heads/master
| 2023-02-05T09:14:27.748810
| 2020-12-23T11:07:50
| 2020-12-23T11:07:50
| 291,051,494
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
from django.core.management.base import BaseCommand
from cian_parser.urls_parser import main as urls_main
class Command(BaseCommand):
help = 'urls parser'
def handle(self, *args, **options):
urls_main()
|
[
"romanshashkin@mail.ru"
] |
romanshashkin@mail.ru
|
c65c2414f43a95a3c537c9d9a57f181522594a14
|
236ed63fc380b10e43fd326e3f17f1ddc8f28b4e
|
/apps/goods/serializer.py
|
7bd4b70073afa4e2aaea35e22f6fd6812815ba2e
|
[] |
no_license
|
pylarva/restFrameworkShop
|
53585caab80b82f82f6d693292ccf4fa8bf33810
|
ddc88dc0ebbdd50927f9b4e6f1d8e4a65239cddb
|
refs/heads/master
| 2020-03-24T23:22:13.780622
| 2018-08-01T10:29:35
| 2018-08-01T10:29:35
| 143,130,351
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,431
|
py
|
# !/usr/bin/env python
# -*- coding:utf-8 -*-
from rest_framework import serializers
from .models import Goods, GoodsCategory
# 1) 使用笨办法序列化数据
# class GoodsSerializer(serializers.Serializer):
# name = serializers.CharField(required=True, max_length=108)
# click_num = serializers.IntegerField(default=0)
# goods_front_image = serializers.ImageField()
#
# def create(self, validated_data):
# """
# 接收前端数据生成数据库数据
# """
# return Goods.objects.create(**validated_data)
# 2)使用serializers序列化简化
# 循环嵌套三级商品菜单
class CategorySerializer3(serializers.ModelSerializer):
class Meta:
model = GoodsCategory
fields = "__all__"
class CategorySerializer2(serializers.ModelSerializer):
sub_cat = CategorySerializer3(many=True)
class Meta:
model = GoodsCategory
fields = "__all__"
class CategorySerializer(serializers.ModelSerializer):
# 在一级商品里面嵌套二级商品 many=True表示会有多个
sub_cat = CategorySerializer2(many=True)
class Meta:
model = GoodsCategory
fields = "__all__"
class GoodsSerializer(serializers.ModelSerializer):
category = CategorySerializer()
class Meta:
model = Goods
# fields = ('name', 'click_num', 'code', 'linenos', 'language', 'style')
fields = "__all__"
|
[
"lichengbing9027@gamil.com"
] |
lichengbing9027@gamil.com
|
3ade350b48eac09ac2875ce074001fd47ea18ca4
|
1ab7b3f2aa63de8488ce7c466a67d367771aa1f2
|
/Ricardo_OS/Python_backend/venv/lib/python3.8/site-packages/pandas/tests/groupby/test_bin_groupby.py
|
f20eed4575e91ce541b6e4f2835eacbe462fdb96
|
[
"MIT"
] |
permissive
|
icl-rocketry/Avionics
|
9d39aeb11aba11115826fd73357b415026a7adad
|
95b7a061eabd6f2b607fba79e007186030f02720
|
refs/heads/master
| 2022-07-30T07:54:10.642930
| 2022-07-10T12:19:10
| 2022-07-10T12:19:10
| 216,184,670
| 9
| 1
|
MIT
| 2022-06-27T10:17:06
| 2019-10-19T09:57:07
|
C++
|
UTF-8
|
Python
| false
| false
| 4,083
|
py
|
import numpy as np
import pytest
from pandas._libs import groupby, lib, reduction as libreduction
from pandas.core.dtypes.common import ensure_int64
import pandas as pd
from pandas import Series, isna
import pandas._testing as tm
def test_series_grouper():
obj = Series(np.random.randn(10))
dummy = obj.iloc[:0]
labels = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1, 1], dtype=np.int64)
grouper = libreduction.SeriesGrouper(obj, np.mean, labels, 2, dummy)
result, counts = grouper.get_result()
expected = np.array([obj[3:6].mean(), obj[6:].mean()])
tm.assert_almost_equal(result, expected)
exp_counts = np.array([3, 4], dtype=np.int64)
tm.assert_almost_equal(counts, exp_counts)
def test_series_grouper_requires_nonempty_raises():
# GH#29500
obj = Series(np.random.randn(10))
dummy = obj.iloc[:0]
labels = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1, 1], dtype=np.int64)
with pytest.raises(ValueError, match="SeriesGrouper requires non-empty `series`"):
libreduction.SeriesGrouper(dummy, np.mean, labels, 2, dummy)
def test_series_bin_grouper():
obj = Series(np.random.randn(10))
dummy = obj[:0]
bins = np.array([3, 6])
grouper = libreduction.SeriesBinGrouper(obj, np.mean, bins, dummy)
result, counts = grouper.get_result()
expected = np.array([obj[:3].mean(), obj[3:6].mean(), obj[6:].mean()])
tm.assert_almost_equal(result, expected)
exp_counts = np.array([3, 3, 4], dtype=np.int64)
tm.assert_almost_equal(counts, exp_counts)
def assert_block_lengths(x):
assert len(x) == len(x._mgr.blocks[0].mgr_locs)
return 0
def cumsum_max(x):
x.cumsum().max()
return 0
@pytest.mark.parametrize("func", [cumsum_max, assert_block_lengths])
def test_mgr_locs_updated(func):
# https://github.com/pandas-dev/pandas/issues/31802
# Some operations may require creating new blocks, which requires
# valid mgr_locs
df = pd.DataFrame({"A": ["a", "a", "a"], "B": ["a", "b", "b"], "C": [1, 1, 1]})
result = df.groupby(["A", "B"]).agg(func)
expected = pd.DataFrame(
{"C": [0, 0]},
index=pd.MultiIndex.from_product([["a"], ["a", "b"]], names=["A", "B"]),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"binner,closed,expected",
[
(
np.array([0, 3, 6, 9], dtype=np.int64),
"left",
np.array([2, 5, 6], dtype=np.int64),
),
(
np.array([0, 3, 6, 9], dtype=np.int64),
"right",
np.array([3, 6, 6], dtype=np.int64),
),
(np.array([0, 3, 6], dtype=np.int64), "left", np.array([2, 5], dtype=np.int64)),
(
np.array([0, 3, 6], dtype=np.int64),
"right",
np.array([3, 6], dtype=np.int64),
),
],
)
def test_generate_bins(binner, closed, expected):
values = np.array([1, 2, 3, 4, 5, 6], dtype=np.int64)
result = lib.generate_bins_dt64(values, binner, closed=closed)
tm.assert_numpy_array_equal(result, expected)
def test_group_ohlc():
def _check(dtype):
obj = np.array(np.random.randn(20), dtype=dtype)
bins = np.array([6, 12, 20])
out = np.zeros((3, 4), dtype)
counts = np.zeros(len(out), dtype=np.int64)
labels = ensure_int64(np.repeat(np.arange(3), np.diff(np.r_[0, bins])))
func = getattr(groupby, f"group_ohlc_{dtype}")
func(out, counts, obj[:, None], labels)
def _ohlc(group):
if isna(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
expected = np.array([_ohlc(obj[:6]), _ohlc(obj[6:12]), _ohlc(obj[12:])])
tm.assert_almost_equal(out, expected)
tm.assert_numpy_array_equal(counts, np.array([6, 6, 8], dtype=np.int64))
obj[:6] = np.nan
func(out, counts, obj[:, None], labels)
expected[0] = np.nan
tm.assert_almost_equal(out, expected)
_check("float32")
_check("float64")
class TestMoments:
pass
|
[
"kd619@ic.ac.uk"
] |
kd619@ic.ac.uk
|
c3e17c5290ac282d69fd29259472c75ac146da2c
|
0ddcfcbfc3faa81c79e320c34c35a972dab86498
|
/puzzles/minimum_score_of_a_path_between_two_cities.py
|
8914e671e9bcec78b594033d27ea3e966a60aca7
|
[] |
no_license
|
IvanWoo/coding-interview-questions
|
3311da45895ac4f3c394b22530079c79a9215a1c
|
1312305b199b65a11804a000432ebe28d1fba87e
|
refs/heads/master
| 2023-08-09T19:46:28.278111
| 2023-06-21T01:47:07
| 2023-06-21T01:47:07
| 135,307,912
| 0
| 0
| null | 2023-07-20T12:14:38
| 2018-05-29T14:24:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,899
|
py
|
# https://leetcode.com/problems/minimum-score-of-a-path-between-two-cities/description/
"""
You are given a positive integer n representing n cities numbered from 1 to n. You are also given a 2D array roads where roads[i] = [ai, bi, distancei] indicates that there is a bidirectional road between cities ai and bi with a distance equal to distancei. The cities graph is not necessarily connected.
The score of a path between two cities is defined as the minimum distance of a road in this path.
Return the minimum possible score of a path between cities 1 and n.
Note:
A path is a sequence of roads between two cities.
It is allowed for a path to contain the same road multiple times, and you can visit cities 1 and n multiple times along the path.
The test cases are generated such that there is at least one path between 1 and n.
Example 1:
Input: n = 4, roads = [[1,2,9],[2,3,6],[2,4,5],[1,4,7]]
Output: 5
Explanation: The path from city 1 to 4 with the minimum score is: 1 -> 2 -> 4. The score of this path is min(9,5) = 5.
It can be shown that no other path has less score.
Example 2:
Input: n = 4, roads = [[1,2,2],[1,3,4],[3,4,7]]
Output: 2
Explanation: The path from city 1 to 4 with the minimum score is: 1 -> 2 -> 1 -> 3 -> 4. The score of this path is min(2,2,4,7) = 2.
Constraints:
2 <= n <= 105
1 <= roads.length <= 105
roads[i].length == 3
1 <= ai, bi <= n
ai != bi
1 <= distancei <= 104
There are no repeated edges.
There is at least one path between 1 and n.
"""
from math import inf
from puzzles.union_find import UF
def min_score(n: int, roads: list[list[int]]) -> int:
min_score_map = [inf] * (n + 1)
uf = UF(n + 1)
for u, v, score in roads:
uf.union(u, v)
min_score_map[u] = min(min_score_map[u], score)
min_score_map[v] = min(min_score_map[v], score)
return min((min_score_map[i] for i in range(1, n + 1) if uf.connected(i, 1)))
|
[
"tyivanwu@gmail.com"
] |
tyivanwu@gmail.com
|
1b7f698663463ef8f33f245786a62d037968f89d
|
c8847d4117204f1d26ad47488152234a64aefb0d
|
/hypergan/samplers/static_batch_sampler.py
|
81931e59ada7b5a027cd5459aa7b45db299cc08f
|
[
"MIT"
] |
permissive
|
Solertis/HyperGAN
|
851f27aa9ba2ef89b6d50f86987c6746c831502d
|
1aceed20c9d9f67de8e3b290ee84f376d64228f0
|
refs/heads/master
| 2021-01-20T14:01:52.353117
| 2017-02-11T09:48:22
| 2017-02-11T09:48:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 759
|
py
|
from hypergan.util.ops import *
from hypergan.util.globals import *
from hypergan.samplers.common import *
#mask_noise = None
z = None
y = None
def sample(sample_file, sess, config):
global z, y
generator = get_tensor("g")[0]
y_t = get_tensor("y")
z_t = get_tensor("z")
x = np.linspace(0,1, 4)
if z is None:
z = sess.run(z_t)
y = sess.run(y_t)
g=tf.get_default_graph()
with g.as_default():
tf.set_random_seed(1)
sample = sess.run(generator, feed_dict={z_t: z, y_t: y})
#plot(self.config, sample, sample_file)
stacks = [np.hstack(sample[x*8:x*8+8]) for x in range(4)]
plot(config, np.vstack(stacks), sample_file)
return [{'image':sample_file, 'label':'grid'}]
|
[
"mikkel@255bits.com"
] |
mikkel@255bits.com
|
c8d9704e46969f972fc3a2f5b971e4f09044ca2b
|
a904358b873d4881a9e61555bfc7a97169c696a8
|
/src/djnydus/db/shards/routers.py
|
c8d5539eb60791895d7eacc5c92f2c1671386aed
|
[
"Apache-2.0"
] |
permissive
|
numan/nydus-django
|
945e91df73af34941a2a281c2b5e2a706f87a3b5
|
b652ea28d7f801f453a64e4ec78899e2acfc7606
|
refs/heads/master
| 2021-01-18T08:19:06.320484
| 2013-03-26T01:49:42
| 2013-03-26T01:49:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 142
|
py
|
"""
djnydus.shards.router
~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2012 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
|
[
"dcramer@gmail.com"
] |
dcramer@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.