blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
34dabfaf114c859834bf8f2b8df7583d891745bb | 6c1933a3694d6a5befe782e95a4034575fe96a63 | /strings/count_Ts.py | a93d2d839f1b54e0ff24392e5af95bb469d321c3 | [] | no_license | katrek/learningbook | 20de4fe0843ef33a18018fa4169172083ab420e5 | a4d882f522a69617c980f3bc98464a4042d8d23d | refs/heads/master | 2020-04-22T11:50:49.427813 | 2019-03-04T08:15:01 | 2019-03-04T08:15:01 | 170,354,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | # This program counts the number of times
# the letter T (uppercase or lowercase)
# appears in a string.
def main():
# Create a variable to use to hold the count
# The variable must start with 0.
count = 0
# Get a string from the user
my_string = input('Enter a sentence: ')
# Count the Ts
for ch in my_string:
if ch == 'T' or ch == 't':
count += 1
# Print the result
print('The letter T appears', count, 'times.')
main()
| [
"akatrek@gmail.com"
] | akatrek@gmail.com |
b39240fd3ecb43034a6fd9481b23da0a188f5b9b | eedea7d2f6ad0f497f1469ab78ea00c3c33bd57a | /hamon_shu/segments/segment_02/.handlers.py | 05335f68abbbc85f92d8e55195a7be4c67eb9250 | [] | no_license | GregoryREvans/hamon_shu | 750927aec941f60bf0b90ee2196a886c19c611ad | 8081ee57fce8db07c3492e67e7a634e3b08f3bb3 | refs/heads/master | 2022-02-27T06:22:44.449635 | 2022-02-10T13:48:23 | 2022-02-10T13:48:23 | 144,753,533 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,525 | py | import abjad
handler_to_value = dict(
[
(
'violin_1_pitch_handler_two',
dict(
[
('pitch_count', 60),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'violin_1_pitch_handler_one',
dict(
[
('pitch_count', 54),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'violin_1_pitch_handler_three',
dict(
[
('pitch_count', 33),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'violin_1_pitch_handler_four',
dict(
[
('pitch_count', 35),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'violin_2_pitch_handler_two',
dict(
[
('pitch_count', 70),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'violin_2_pitch_handler_one',
dict(
[
('pitch_count', 27),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'violin_2_pitch_handler_three',
dict(
[
('pitch_count', 44),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'violin_2_pitch_handler_four',
dict(
[
('pitch_count', 15),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'viola_pitch_handler_two',
dict(
[
('pitch_count', 82),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'viola_pitch_handler_one',
dict(
[
('pitch_count', 39),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'viola_pitch_handler_three',
dict(
[
('pitch_count', 50),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'viola_pitch_handler_four',
dict(
[
('pitch_count', 39),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'cello_pitch_handler_two',
dict(
[
('pitch_count', 58),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'cello_pitch_handler_one',
dict(
[
('pitch_count', 32),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'cello_pitch_handler_three',
dict(
[
('pitch_count', 36),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'cello_pitch_handler_four',
dict(
[
('pitch_count', 14),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'dynamic_handler_one',
dict(
[
('count_1', 31),
('count_2', 11),
('count_3', 19),
('count_4', 12),
('count_5', 31),
]
),
),
(
'dynamic_handler_two',
dict(
[
('count_1', 7),
('count_2', 2),
('count_3', 4),
('count_4', 2),
('count_5', 7),
]
),
),
(
'articulation_handler_three',
dict(
[
('count_1', 70),
('count_2', 37),
('count_3', -1),
]
),
),
(
'articulation_handler_two',
dict(
[
('count_1', -1),
('count_2', 9),
('count_3', 4),
]
),
),
(
'notehead handler one',
dict(
[
('count', 11),
('head_vector_count', 11),
('transition_vector_count', 11),
]
),
),
(
'notehead handler two',
dict(
[
('count', 17),
('head_vector_count', 98),
('transition_vector_count', 98),
]
),
),
(
'notehead handler three',
dict(
[
('count', 38),
('head_vector_count', 38),
('transition_vector_count', 38),
]
),
),
]
) | [
"gregoryrowlandevans@gmail.com"
] | gregoryrowlandevans@gmail.com |
326a0471da1c7533d4d4d479008aa4ac0104fbf6 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_168/78.py | e9f599963fb626cb7e5361db569b6480d4061af1 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,637 | py | def shoot(x, n, m, i, j, di, dj):
while 0 <= i and i < n and 0 <= j and j < m:
i += di
j += dj
if 0 <= i and i < n and 0 <= j and j < m and x[i][j] != '.':
return True
return False
def fail(x, n, m, i, j, di, dj):
if shoot(x, n, m, i, j, di, dj):
return 0
if (shoot(x, n, m, i, j, 0, 1)
or shoot(x, n, m, i, j, 0, -1)
or shoot(x, n, m, i, j, 1, 0)
or shoot(x, n, m, i, j, -1, 0)):
return 1
else:
return -1
ttt = int(raw_input())
for tt in xrange(1, ttt+1):
x = []
ans = 0
n, m = map(int, raw_input().strip().split())
for i in xrange(n):
x.append(raw_input().strip())
for i in xrange(n):
for j in xrange(m):
if x[i][j] == '>':
p = fail(x, n, m, i, j, 0, 1)
if p == -1:
ans = -1
elif ans != -1:
ans += p
elif x[i][j] == '<':
p = fail(x, n, m, i, j, 0, -1)
if p == -1:
ans = -1
elif ans != -1:
ans += p
elif x[i][j] == 'v':
p = fail(x, n, m, i, j, 1, 0)
if p == -1:
ans = -1
elif ans != -1:
ans += p
elif x[i][j] == '^':
p = fail(x, n, m, i, j, -1, 0)
if p == -1:
ans = -1
elif ans != -1:
ans += p
if ans == -1:
print 'Case #%d: IMPOSSIBLE' % tt
else:
print 'Case #%d: %d' % (tt, ans) | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
8fbb76879f325945c09353ed6fc34d423c236185 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_freedmen.py | 9fcd092dafa3654c67aa821a36c2c005b9e936c5 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py |
from xai.brain.wordbase.nouns._freedman import _FREEDMAN
#calss header
class _FREEDMEN(_FREEDMAN, ):
def __init__(self,):
_FREEDMAN.__init__(self)
self.name = "FREEDMEN"
self.specie = 'nouns'
self.basic = "freedman"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
6e745a7e9688972239426bb82b10885ebedb14cf | 964f2882117ff656d7a2757c233c6dd88226d975 | /packages/models-library/tests/test_service_settings_labels.py | a4f98e18cf7bdfc0609d359377e6cebd15bfad01 | [
"MIT"
] | permissive | ignapas/osparc-simcore | a002dd47d7689af9c1c650eea33e31add2b182c1 | cb62e56b194265a907f260f3071c55a65f569823 | refs/heads/master | 2023-01-22T08:55:32.580775 | 2022-12-09T15:57:36 | 2022-12-09T15:57:36 | 170,852,656 | 0 | 0 | MIT | 2023-01-09T05:03:04 | 2019-02-15T11:12:34 | Python | UTF-8 | Python | false | false | 4,991 | py | # pylint:disable=unused-variable
# pylint:disable=unused-argument
# pylint:disable=redefined-outer-name
from collections import namedtuple
from copy import deepcopy
from pprint import pformat
from typing import Any
import pytest
from models_library.service_settings_labels import (
PathMappingsLabel,
SimcoreServiceLabels,
SimcoreServiceSettingLabelEntry,
SimcoreServiceSettingsLabel,
)
from pydantic import BaseModel, ValidationError
SimcoreServiceExample = namedtuple(
"SimcoreServiceExample", "example, items, uses_dynamic_sidecar, id"
)
SIMCORE_SERVICE_EXAMPLES = [
SimcoreServiceExample(
example=SimcoreServiceLabels.Config.schema_extra["examples"][0],
items=1,
uses_dynamic_sidecar=False,
id="legacy",
),
SimcoreServiceExample(
example=SimcoreServiceLabels.Config.schema_extra["examples"][1],
items=3,
uses_dynamic_sidecar=True,
id="dynamic-service",
),
SimcoreServiceExample(
example=SimcoreServiceLabels.Config.schema_extra["examples"][2],
items=5,
uses_dynamic_sidecar=True,
id="dynamic-service-with-compose-spec",
),
]
@pytest.mark.parametrize(
"example, items, uses_dynamic_sidecar",
[(x.example, x.items, x.uses_dynamic_sidecar) for x in SIMCORE_SERVICE_EXAMPLES],
ids=[x.id for x in SIMCORE_SERVICE_EXAMPLES],
)
def test_simcore_service_labels(
example: dict, items: int, uses_dynamic_sidecar: bool
) -> None:
simcore_service_labels = SimcoreServiceLabels.parse_obj(example)
assert simcore_service_labels
assert len(simcore_service_labels.dict(exclude_unset=True)) == items
assert simcore_service_labels.needs_dynamic_sidecar == uses_dynamic_sidecar
def test_service_settings() -> None:
simcore_settings_settings_label = SimcoreServiceSettingsLabel.parse_obj(
SimcoreServiceSettingLabelEntry.Config.schema_extra["examples"]
)
assert simcore_settings_settings_label
assert len(simcore_settings_settings_label) == len(
SimcoreServiceSettingLabelEntry.Config.schema_extra["examples"]
)
assert simcore_settings_settings_label[0]
# ensure private attribute assignment
for service_setting in simcore_settings_settings_label:
# pylint: disable=protected-access
service_setting._destination_containers = ["random_value1", "random_value2"]
@pytest.mark.parametrize(
"model_cls",
(
SimcoreServiceSettingLabelEntry,
SimcoreServiceSettingsLabel,
SimcoreServiceLabels,
),
)
def test_service_settings_model_examples(
model_cls: type[BaseModel], model_cls_examples: dict[str, dict[str, Any]]
) -> None:
for name, example in model_cls_examples.items():
print(name, ":", pformat(example))
model_instance = model_cls(**example)
assert model_instance, f"Failed with {name}"
@pytest.mark.parametrize(
"model_cls",
(SimcoreServiceLabels,),
)
def test_correctly_detect_dynamic_sidecar_boot(
model_cls: type[BaseModel], model_cls_examples: dict[str, dict[str, Any]]
) -> None:
for name, example in model_cls_examples.items():
print(name, ":", pformat(example))
model_instance = model_cls(**example)
assert model_instance.needs_dynamic_sidecar == (
"simcore.service.paths-mapping" in example
)
def test_raises_error_if_http_entrypoint_is_missing() -> None:
simcore_service_labels: dict[str, Any] = deepcopy(
SimcoreServiceLabels.Config.schema_extra["examples"][2]
)
del simcore_service_labels["simcore.service.container-http-entrypoint"]
with pytest.raises(ValueError):
SimcoreServiceLabels(**simcore_service_labels)
def test_path_mappings_none_state_paths() -> None:
sample_data = deepcopy(PathMappingsLabel.Config.schema_extra["example"])
sample_data["state_paths"] = None
with pytest.raises(ValidationError):
PathMappingsLabel(**sample_data)
def test_path_mappings_json_encoding() -> None:
example = PathMappingsLabel.Config.schema_extra["example"]
path_mappings = PathMappingsLabel.parse_obj(example)
print(path_mappings)
assert PathMappingsLabel.parse_raw(path_mappings.json()) == path_mappings
def test_simcore_services_labels_compose_spec_null_container_http_entry_provided() -> None:
sample_data = deepcopy(SimcoreServiceLabels.Config.schema_extra["examples"][2])
assert sample_data["simcore.service.container-http-entrypoint"]
sample_data["simcore.service.compose-spec"] = None
with pytest.raises(ValidationError):
SimcoreServiceLabels(**sample_data)
def test_raises_error_wrong_restart_policy() -> None:
simcore_service_labels: dict[str, Any] = deepcopy(
SimcoreServiceLabels.Config.schema_extra["examples"][2]
)
simcore_service_labels["simcore.service.restart-policy"] = "__not_a_valid_policy__"
with pytest.raises(ValueError):
SimcoreServiceLabels(**simcore_service_labels)
| [
"noreply@github.com"
] | ignapas.noreply@github.com |
600c64a6d15fa03d9177fd01005f85d4ea9f9f44 | 71660e47b7b6f704c0329b646c0a65cb8732880a | /vissl/engines/__init__.py | 0e72d0143746ee9c2dcef587dc4504031d16ca37 | [
"MIT"
] | permissive | dailing/vissl | 1a278ea0fb060ca65cbbbed90280026dcbd74668 | 7ad8ecad2ade3074912f7d8ef57debe95bfd9939 | refs/heads/master | 2023-06-03T05:45:19.329156 | 2021-06-21T21:08:11 | 2021-06-21T21:09:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from vissl.engines.engine_registry import register_engine, run_engine # noqa
from vissl.engines.extract_features import extract_main # noqa
from vissl.engines.train import train_main # noqa
__all__ = [k for k in globals().keys() if not k.startswith("_")]
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
7608ff71191692b21bb793bffbc9d84e22d7da38 | 832852c679816673f708860929a36a20ca8d3e32 | /Configurations/ggH_SF/Full2016_nAODv5/maxDNN/cuts_2j_dymvaOptim.py | 3b213c3ebfbba55e732be2303447f8e547a68e04 | [] | no_license | UniMiBAnalyses/PlotsConfigurations | c4ec7376e2757b838930dfb2615e1dc99a64e542 | 578fe518cfc608169d3418bcb63a8342d3a24390 | refs/heads/master | 2023-08-31T17:57:45.396325 | 2022-09-01T10:13:14 | 2022-09-01T10:13:14 | 172,092,793 | 0 | 13 | null | 2023-04-27T10:26:52 | 2019-02-22T15:52:44 | Python | UTF-8 | Python | false | false | 6,778 | py |
supercut = ' mll > 12 \
&& Lepton_pt[0]>20 \
&& Lepton_pt[1]>10 \
&& (abs(Lepton_pdgId[0])==13 || Lepton_pt[0]>25) \
&& (abs(Lepton_pdgId[1])==13 || Lepton_pt[1]>13) \
&& (nLepton>=2 && Alt$(Lepton_pt[2],0)<10) \
&& abs(Lepton_eta[0])<2.5 && abs(Lepton_eta[1])<2.5 \
&& ptll>30 \
&& PuppiMET_pt > 20 \
&& (hww_DYmvaDNN_2j(Entry$) > 0.8) \
&& 2jggH \
'
optim={}
##optim['dymva0p805'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.805 '
#optim['dymva0p81'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.81 '
#optim['dymva0p82'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.82 '
##optim['dymva0p825'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.825 '
optim['dymva0p83'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.83 '
optim['dymva0p835'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.835 '
optim['dymva0p84'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.84 '
optim['dymva0p845'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.845 '
##optim['dymva0p85'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.85 '
optim['dymva0p855'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.855 '
optim['dymva0p86'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.86 '
optim['dymva0p865'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.865 '
optim['dymva0p87'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.87 '
##optim['dymva0p875'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.875 '
#optim['dymva0p88'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.88 '
#optim['dymva0p885'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.885 '
#optim['dymva0p89'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.89 '
#optim['dymva0p895'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.895 '
##optim['dymva0p90'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.90 '
#optim['dymva0p905'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.905 '
#optim['dymva0p91'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.91 '
#optim['dymva0p915'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.915 '
#optim['dymva0p92'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.92 '
##optim['dymva0p925'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.925 '
#optim['dymva0p93'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.93 '
#optim['dymva0p935'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.935 '
#optim['dymva0p94'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.94 '
#optim['dymva0p945'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.945 '
##optim['dymva0p95'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.95 '
#optim['dymva0p955'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.955 '
#optim['dymva0p96'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.96 '
#optim['dymva0p965'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.965 '
#optim['dymva0p97'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.97 '
##optim['dymva0p975'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.975 '
#optim['dymva0p98'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.98 '
#optim['dymva0p985'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.985 '
#optim['dymva0p99'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.99 '
##optim['dymva0p995'] = ' && hww_DYmvaDNN_2j(Entry$) > 0.995 '
for iCut in optim:
# Higgs Signal Regions: ee/uu * 0/1 jet
cuts['hww2l2v_13TeV_2jee_'+iCut] = '(Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) \
&& ZVeto \
&& bVeto \
&& Higgs2jetee \
' + optim[iCut]
cuts['hww2l2v_13TeV_2jmm_'+iCut] = '(Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) \
&& ZVeto \
&& bVeto \
&& Higgs2jetee \
' + optim[iCut]
## Top CR: No H sel , bTag , tight DYmva
cuts['hww2l2v_13TeV_top_2jee_'+iCut] = '(Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) \
&& ZVeto \
&& topcr \
' + optim[iCut]
cuts['hww2l2v_13TeV_top_2jmm_'+iCut] = '(Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) \
&& ZVeto \
&& topcr \
' + optim[iCut]
## WW CR: No H Sel , mll>80, tight DYMva
cuts['hww2l2v_13TeV_WW_2jee_'+iCut] = '(Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) \
&& wwcr \
' + optim[iCut]
cuts['hww2l2v_13TeV_WW_2jmm_'+iCut] = '(Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) \
&& wwcr \
' + optim[iCut]
## DY Background IN with DYMVA>0.9X : Split ee/mm , No H cut !
cuts['hww2l2v_13TeV_DYin_2jee_'+iCut] = '(Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) \
&& bVeto \
&& fabs(91.1876 - mll) < 7.5 \
' + optim[iCut]
cuts['hww2l2v_13TeV_DYin_2jmm_'+iCut] = '(Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) \
&& bVeto \
&& fabs(91.1876 - mll) < 7.5 \
' + optim[iCut]
cuts['hww2l2v_13TeV_DYin_2jdf_'+iCut] = '(Lepton_pdgId[0]*Lepton_pdgId[1] == -11*13) \
&& bVeto \
&& fabs(91.1876 - mll) < 7.5 \
' + optim[iCut]
## DY Background IN with btag : Split ee/mm , No H cut !
# 2jet only: Negligible DY background in 2jet bTag region
cuts['hww2l2v_13TeV_DYin_btag_2jee_'+iCut] = '(Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) \
&& topcr \
&& fabs(91.1876 - mll) < 7.5 \
' + optim[iCut]
cuts['hww2l2v_13TeV_DYin_btag_2jmm_'+iCut] = '(Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) \
&& topcr \
&& fabs(91.1876 - mll) < 7.5 \
' + optim[iCut]
cuts['hww2l2v_13TeV_DYin_btag_2jdf_'+iCut] = '(Lepton_pdgId[0]*Lepton_pdgId[1] == -11*13) \
&& topcr \
&& fabs(91.1876 - mll) < 7.5 \
' + optim[iCut]
## Loose dymva + H sel for DY Acc
cuts['hww2l2v_13TeV_2jee_HAccNum'] = '(Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) \
&& ZVeto \
&& bVeto \
&& Higgs2jetee \
&& hww_DYmvaDNN_2j(Entry$) > 0.8 \
'
cuts['hww2l2v_13TeV_2jmm_HAccNum'] = '(Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) \
&& ZVeto \
&& bVeto \
&& Higgs2jetee \
&& hww_DYmvaDNN_2j(Entry$) > 0.8 \
'
## DY CR for Acc Denominator
cuts['hww2l2v_13TeV_2jee_AccDen'] = '(Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) \
&& ZVeto \
&& bVeto \
&& hww_DYmvaDNN_2j(Entry$) > 0.8 \
'
cuts['hww2l2v_13TeV_2jmm_AccDen'] = '(Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) \
&& ZVeto \
&& bVeto \
&& hww_DYmvaDNN_2j(Entry$) > 0.8 \
'
## Loose dymva + WW sel for DY Acc
cuts['hww2l2v_13TeV_WW_2jee_WWAccNum'] = '(Lepton_pdgId[0]*Lepton_pdgId[1] == -11*11) \
&& wwcr \
&& hww_DYmvaDNN_2j(Entry$) > 0.8 \
'
cuts['hww2l2v_13TeV_WW_2jmm_WWAccNum'] = '(Lepton_pdgId[0]*Lepton_pdgId[1] == -13*13) \
&& wwcr \
&& hww_DYmvaDNN_2j(Entry$) > 0.8 \
'
| [
"davide.di.croce@cern.ch"
] | davide.di.croce@cern.ch |
5e598527209c7bd0a4183e7c16f91b5d8d42e06c | a6296e4681fbf4a2282b3d77f7ffc3e0cf3b700d | /list_property/custom_namedlist.py | 2da60839be29c47a884a9f4bee4b2746ca25e437 | [
"MIT"
] | permissive | justengel/list_property | 331769d49268735bca9ddd5058f27bfbf8070f61 | 34a43976a0f12a84d387c3851beb1fcebc2182ce | refs/heads/master | 2020-03-23T11:09:15.219529 | 2018-07-18T20:28:58 | 2018-07-18T20:28:58 | 141,486,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,001 | py | from .custom_property import list_property
__all__ = ['NamedList', 'namedlist']
class NamedListMetaclass(type):
def __new__(typ, name, bases, attrs):
cls = super().__new__(typ, name, bases, attrs)
cls.__properties__ = [attr for attr in dir(cls) if isinstance(getattr(cls, attr), list_property)]
return cls
class NamedList(list, metaclass=NamedListMetaclass):
def __init__(self, *args, **kwargs):
if len(args) > 1:
args = [args]
super().__init__(*args)
for key, value in kwargs.items():
setattr(self, key, value)
def __iter__(self):
self.__current_index__ = 0
return self
def __next__(self):
idx = self.__current_index__
self.__current_index__ += 1
try:
return self[idx]
except:
raise StopIteration
def __getitem__(self, i):
try:
return super().__getitem__(i)
except IndexError as err:
try:
attr = self.__properties__[i]
prop = getattr(self.__class__, attr)
return prop.fget(self)
except:
pass
raise err
# def __str__(self):
# return '[{0}]'.format(', '.join((repr(i) for i in self)))
def _get_value(obj, index, default=None):
try:
return obj[index]
except:
return default
def namedlist(name, field_names, defaults=None):
if not isinstance(field_names, (list, tuple)):
field_names = field_names.split()
if isinstance(defaults, dict):
fields = {name: list_property(i, defaults.get(name, None)) for i, name in enumerate(field_names)}
elif isinstance(defaults, (list, tuple)):
fields = {name: list_property(i, _get_value(defaults, i, None)) for i, name in enumerate(field_names)}
else:
fields = {name: list_property(i, defaults) for i, name in enumerate(field_names)}
return type(name, (NamedList,), fields)
| [
"jtengel08@gmail.com"
] | jtengel08@gmail.com |
7183b395369fb2cad65cb1de32df5e2f8cb8ae5e | 9ca6ec384f99fb8be5b2fc05bee405fd22607485 | /exos/exo2.py | 26d3038a0d0d51d5fae07eab4e9540f43a73c5ac | [] | no_license | gb-0001/python-refinitiv | 39e5b6b2b264aee5a560dba00c6f26c19d6cdf65 | 06119b94f2fb73b776d816a40e1a4a451763b6c2 | refs/heads/master | 2023-04-09T13:55:19.956342 | 2021-04-21T09:01:11 | 2021-04-21T09:01:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 880 | py | '''
*** Exo 2 ***
Ecrire un programme python qui propose à l'utilisateur
de deviner un nombre caché (exemple: 560) et
affichera en fonction de la réponse de l'utilisateur:
- "c'est plus" si le nombre saisi est inférieur au nombre caché
- "c'est moins" si le nombre saisi est supérieur au nombre caché
Tant que l'utilisateur n'a pas trouvé le nombre caché
on lui demande la saisie
'''
print("*** EXO 2: chiffre mystère à deviner --- version 2 --- ***")
#guessNumber = 560
from random import randint
guessNumber = randint(1, 20)
userNumber = int(input("Essaie de deviner mon chiffre. Saisis un chiffre : "))
while userNumber != guessNumber:
if userNumber > guessNumber:
print("C'est moins")
elif userNumber < guessNumber:
print("C'est plus")
userNumber = int(input("Essaie de deviner mon chiffre. Saisis un chiffre : "))
print("Bravo !") | [
"opusidea@gmail.com"
] | opusidea@gmail.com |
346ed74309df7fa44322893cf06faf1b47a2a228 | 054b665faf3c099efb3e24768b4dcb24e054b011 | /flask/bin/pip-2.7 | 76d9553d171453470fb8e113ec24df113c931249 | [] | no_license | linqili2006/mywebsite | 8cbec2c32e8a38db2e7d5e0e6745800dd09f239a | 4ff202d75cab59d507755f48020d02c2ec0a6eb7 | refs/heads/master | 2020-03-15T11:14:24.543491 | 2018-05-10T08:31:59 | 2018-05-10T08:31:59 | 132,116,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 304 | 7 | #!/root/website/flask/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==1.4.1','console_scripts','pip-2.7'
__requires__ = 'pip==1.4.1'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('pip==1.4.1', 'console_scripts', 'pip-2.7')()
)
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
4f3af2198a7225f81bb39715d75d3564e1c1bfda | 738c7fe93e7b045ab417a4258b74857c8887fa41 | /CpWebSourcePlatform/apps/blog/views.py | 97e991f3a09f4e04308f3db51af77a2629d136ea | [] | no_license | AmirHuang/CpWebSourcePlatform | da61bf5c5de2f4318f9e092cd8ae7f63d196ea35 | 82d8259164e83da4f50b0b5bc6fb8fb1d1f2a454 | refs/heads/master | 2020-04-25T03:45:07.304614 | 2019-02-25T10:48:54 | 2019-02-25T10:48:54 | 172,487,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,048 | py | from .models import BlogActicle, BlogActicleBanner, BlogCategory
from .serializers import BlogActicleSerializers, BlogActicleBannerSerializers, BlogCategorySerializers, \
BlogActiclePublicSerializers
from django.http import Http404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import mixins
from rest_framework import generics
from rest_framework import viewsets
from rest_framework import filters
from rest_framework.pagination import PageNumberPagination
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rest_framework.authentication import SessionAuthentication
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.permissions import IsAuthenticated
from rest_framework_extensions.cache.mixins import CacheResponseMixin
from .filters import BlogActicleFilter
from utils.permissions import IsOwnerOrReadOnly
class BlogActicleResultsSetPagination(PageNumberPagination):
page_size = 1
page_size_query_param = 'page_size'
page_query_param = 'page_index'
max_page_size = 100
class BlogActicleListViewSet(CacheResponseMixin, mixins.CreateModelMixin, mixins.ListModelMixin,
mixins.RetrieveModelMixin, viewsets.GenericViewSet):
"""
list:
返回博客文章列表,过滤,排序,搜索,分页,以及根据ID返回某一篇文章
"""
# permission_classes = (IsAuthenticated, IsOwnerOrReadOnly)
# authentication_classes = (JSONWebTokenAuthentication, SessionAuthentication,)
queryset = BlogActicle.objects.all().order_by('-add_time')
pagination_class = BlogActicleResultsSetPagination
filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter,)
# filter_class = BlogActicleFilter
search_fields = ('acticle_name', 'acticle_content')
ordering_fields = ('comment_num', 'fav_num', 'click_num', 'add_time')
# 动态设置serializer
def get_serializer_class(self):
if self.action == 'create':
return BlogActiclePublicSerializers
return BlogActicleSerializers
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
instance.click_num += 1
instance.save()
serializer = self.get_serializer(instance)
return Response(serializer.data)
class BlogActicleBannerListViewSet(CacheResponseMixin, mixins.ListModelMixin, viewsets.GenericViewSet):
"""
list:
返回博客文章轮播图
"""
queryset = BlogActicleBanner.objects.all()
serializer_class = BlogActicleBannerSerializers
class BlogCategoryListViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):
"""
list:
返回博客文章类别
"""
permission_classes = (IsAuthenticated,)
authentication_classes = (JSONWebTokenAuthentication, SessionAuthentication,)
queryset = BlogCategory.objects.filter(category_type=1)
serializer_class = BlogCategorySerializers
| [
"429771087@qq.com"
] | 429771087@qq.com |
e9a9e42fa4577b9e5879c27a5e383eabc09a5db7 | 1ad4b4f46e9e3cafdf8ccb17eb7703905847fda2 | /text paper/5method override.py | 202a0d87a5b82bfc7677aeee232734e11168857e | [] | no_license | febacc103/febacc103 | 09711cd1d9e4c06bdb1631a72d86fe34e3edd13d | d5ebf3534a9ec2f3634f89c894816b22a7fbaa80 | refs/heads/master | 2023-03-29T22:25:48.073291 | 2021-04-20T08:31:44 | 2021-04-20T08:31:44 | 359,677,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 785 | py | # 5. What is method overriding give an example using Books class?
#===============================================================
# Overriding is the property of a class to change
# the implementation of a method provided by one
# of its base classes. ... Method overriding is thus
# a part of the inheritance mechanism. In Python
# method overriding occurs by simply defining in
# the child class a method with the same name of a method
# in the parent class.
#=============================================================
class Book:
def m1(self,bname):
self.bname=bname
print("branch:",self.bname)
class Sbook(Book):
def m2(self,sbname):
self.sbname=sbname
print("book name:",self.sbname)
obj=Sbook()
obj.m1("science")
obj.m2("chemistry") | [
"febajohnjoicyblesson@gmail.com"
] | febajohnjoicyblesson@gmail.com |
5d3d0d452b4724f77676b09b7d81634d2c3fbf1c | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Detectron/lib/utils/io.py | 3e75499df0514c48b466aaf13e03a2d4011c8495 | [
"MIT",
"Apache-2.0"
] | permissive | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:2046fe0be4ae0121a62583ea3521a7ec5832ea9f0cfbb3135cd05f293d4bc580
size 4760
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
ef36c98ff28729a05721d5275994440c4b1b49b5 | e5cf5318900c0128140dafc26c8eab750246b21d | /scripts/data_convert/msmarco/convert_queries.py | 95f85e3dc8433a9c4ebb3d5bd0211ee6bc52dbc3 | [
"Apache-2.0",
"BSD-2-Clause"
] | permissive | dzynin/FlexNeuART | 113f000bdd5da36ef2273f017e72b0de19f01cce | 741734527e6e3add6ed1de893c49517999a36688 | refs/heads/master | 2023-04-21T16:59:28.870489 | 2021-04-27T02:16:49 | 2021-04-27T02:16:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,666 | py | #!/usr/bin/env python
# Convert MSMARCO queries
import sys
import json
import argparse
import pytorch_pretrained_bert
sys.path.append('.')
from scripts.data_convert.text_proc import SpacyTextParser
from scripts.data_convert.convert_common import STOPWORD_FILE, BERT_TOK_OPT_HELP, BERT_TOK_OPT, \
FileWrapper, read_stop_words, add_retokenized_field
from scripts.config import TEXT_BERT_TOKENIZED_NAME, \
TEXT_FIELD_NAME, DOCID_FIELD, BERT_BASE_MODEL, \
TEXT_RAW_FIELD_NAME, TEXT_UNLEMM_FIELD_NAME, \
IMAP_PROC_CHUNK_QTY, REPORT_QTY, SPACY_MODEL
parser = argparse.ArgumentParser(description='Convert MSMARCO-adhoc queries.')
parser.add_argument('--input', metavar='input file', help='input file',
type=str, required=True)
parser.add_argument('--output', metavar='output file', help='output file',
type=str, required=True)
parser.add_argument('--min_query_token_qty', type=int, default=0,
metavar='min # of query tokens', help='ignore queries that have smaller # of tokens')
parser.add_argument('--' + BERT_TOK_OPT, action='store_true', help=BERT_TOK_OPT_HELP)
args = parser.parse_args()
print(args)
arg_vars = vars(args)
inp_file = FileWrapper(args.input)
out_file = FileWrapper(args.output, 'w')
min_query_tok_qty = args.min_query_token_qty
stop_words = read_stop_words(STOPWORD_FILE, lower_case=True)
print(stop_words)
nlp = SpacyTextParser(SPACY_MODEL, stop_words, keep_only_alpha_num=True, lower_case=True)
if arg_vars[BERT_TOK_OPT]:
print('BERT-tokenizing input into the field: ' + TEXT_BERT_TOKENIZED_NAME)
bert_tokenizer = pytorch_pretrained_bert.BertTokenizer.from_pretrained(BERT_BASE_MODEL)
# Input file is a TSV file
ln = 0
for line in inp_file:
ln += 1
line = line.strip()
if not line:
continue
fields = line.split('\t')
if len(fields) != 2:
print('Misformated line %d ignoring:' % ln)
print(line.replace('\t', '<field delimiter>'))
continue
did, query_orig = fields
query_lemmas, query_unlemm = nlp.proc_text(query_orig)
query_toks = query_lemmas.split()
if len(query_toks) >= min_query_tok_qty:
doc = {DOCID_FIELD: did,
TEXT_FIELD_NAME: query_lemmas,
TEXT_UNLEMM_FIELD_NAME: query_unlemm,
TEXT_RAW_FIELD_NAME: query_orig}
add_retokenized_field(doc, TEXT_RAW_FIELD_NAME, TEXT_BERT_TOKENIZED_NAME, bert_tokenizer)
doc_str = json.dumps(doc) + '\n'
out_file.write(doc_str)
if ln % REPORT_QTY == 0:
print('Processed %d queries' % ln)
print('Processed %d queries' % ln)
inp_file.close()
out_file.close()
| [
"leo@boytsov.info"
] | leo@boytsov.info |
335d9bee562e127a1da8811a82f9a7643b8f388a | 7af92e61743bf07b1f96bff1b10ae873b6e1afea | /roadsinHackerland.py | a8cb425ff8e33c5a0544da0cd6d89f5996fd70e1 | [] | no_license | udwivedi394/graphTheory | 13c6cef37998e8804daef65b72cae2d979b3e307 | 70be616b5a6ba7bda7c311f3724125673fa35717 | refs/heads/master | 2021-05-14T09:46:38.551503 | 2018-10-02T16:23:44 | 2018-10-02T16:23:44 | 116,335,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,989 | py | import sys
class Node:
def __init__(self,data):
self.data = data
self.rank = 0
self.parent = self
#Return the overall Rank of current set
def findRank(self):
temp = self.findSet()
return temp.rank
#Find the main representative set of current set
def findSet(self):
temp = self.parent
while temp.parent != temp:
temp = temp.parent
#Path Compression, set the parent of current pointer to the representative set directly
self.parent = temp
return temp
#Print the current hierarchy of current set, bottom's up
def printSet(self):
temp = self
while 1:
print temp.data,
if temp.parent == temp:
break
temp = temp.parent
#Make a new set
def makeSet(data):
return Node(data)
#Merge two sets
def union(set1,set2):
#If the rank of both the sets is same, then
if set1.findRank() == set2.findRank():
#Set the parent of representative of set2 to representative of set1
set2.findSet().parent = set1.findSet()
#Increment the overall rank of set1 by 1
set1.findSet().rank += 1
elif set1.findRank() > set2.findRank():
#Set the parent of representative of set2 to representative of set1
set2.findSet().parent = set1.findSet()
else:
#Set the parent of representative of set1 to representative of set2
set1.findSet().parent = set2.findSet()
class Graph:
def __init__(self,vertices):
self.vertices = vertices
self.graph = []
self.graphmap = {}
self.overall = 0
for i in range(1,vertices+1):
self.graphmap[i] = []
def addEdge(self,u,v,weight):
self.graph.append([u,v,weight])
def addEdgemap(self,u,v,data):
self.graphmap[u].append([v,data])
self.graphmap[v].append([u,data])
return
def sortedEdges(self):
self.graph = sorted(self.graph, key=lambda item: item[2])
return
def mst02(n, graph):
lookup = {}
for i in range(1,n+1):
lookup[i] = makeSet(i)
graph.sortedEdges()
#graph.graph.sort()
for edge in graph.graph:
if lookup[edge[0]].findSet()==lookup[edge[1]].findSet():
continue
else:
graph.addEdgemap(edge[0],edge[1],edge[2])
union(lookup[edge[0]],lookup[edge[1]])
return
"""
f1 = open("/home/utkarsh/utk_reboot/python/graphTheory/kruskalTestCase.txt",'r')
if __name__ == "__main__":
n, m = f1.readline().strip().split(' ')
n, m = [int(n), int(m)]
graph = Graph(n)
for edges_i in xrange(m):
x,y,r = map(int,f1.readline().strip().split(' '))
graph.addEdge(x,y,r)
#graph.addEdgemap(x,y,r)
result = mst02(n, graph)
print result
"""
ans = None
def dfs(s,graph,visited):
visited.add(s)
sumi = 0
for neighbour in graph.graphmap[s]:
if neighbour[0] not in visited:
temp = 1+dfs(neighbour[0],graph,visited)
#graph.overall += (2**neighbour[1])*(temp)*(graph.vertices-temp)
ans[neighbour[1]] += (temp)*(graph.vertices-temp)
sumi += temp
return sumi
if __name__ == "__main__":
n, m = sys.stdin.readline().strip().split(' ')
n, m = [int(n), int(m)]
ans = [0]*(m*2)
graph = Graph(n)
for edges_i in xrange(m):
x,y,r = map(int,sys.stdin.readline().strip().split(' '))
graph.addEdge(x,y,r)
result = mst02(n, graph)
visited = set()
dfs(1,graph,visited)
for i in range(len(ans)-1):
ans[i+1] += ans[i]/2
ans[i] %= 2
#print "New:",ans
flag = False
real_ans = []
for i in range(len(ans) - 1, -1, -1):
if flag or ans[i] > 0:
flag = True
real_ans.append(ans[i])
print ''.join(map(str,real_ans))
#"""
| [
"utkarshdwivedi394@gmail.com"
] | utkarshdwivedi394@gmail.com |
b7a40c783d085a9b264fa7bfc44385ec9a4ddcef | 04a40566d258744d6dc2b361debbb41215206cbe | /01-resnets/code/data_loader/data_loader_util.py | b8c2baef8951cecf6efdd8258390b1ee9f2e7556 | [] | no_license | keivanB/papers | ec1fec83bb9be37d82129951d4f4188c4fce40c2 | c8cd485a1b1c0697a4a4734bb6a2a32fd9c93a5d | refs/heads/master | 2020-04-01T07:38:33.225626 | 2018-11-27T16:36:07 | 2018-11-27T16:36:07 | 152,997,498 | 0 | 0 | null | 2018-10-14T16:59:12 | 2018-10-14T16:59:11 | null | UTF-8 | Python | false | false | 1,618 | py | import torch.utils.data as data
import torchvision
import torchvision.transforms as transforms
def get_cifar_loaders(batch_size, num_workers):
"""Get the `DataLoader`s for this experiment.
Args:
batch_size: Batch size for each `DataLoader`.
num_workers: Number of worker threads for each `DataLoader`.
Returns:
train_loader, test_loader, classes: Data loaders and a tuple of valid classes.
"""
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# Load CIFAR10 dataset
train_transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
train_set = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=train_transform)
train_loader = data.DataLoader(train_set, batch_size=batch_size,
shuffle=True, num_workers=num_workers)
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
test_set = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=test_transform)
test_loader = data.DataLoader(test_set, batch_size=batch_size,
shuffle=False, num_workers=num_workers)
return train_loader, test_loader, classes
| [
"chutechristopher@gmail.com"
] | chutechristopher@gmail.com |
b2a6e7a0c802dc945e7bd16c610a6b84d3c1a187 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2645/60694/283292.py | 0a6b73a41973925581f90ae9cd96a723eef19d17 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | import math
def eatable(K):
return sum(math.ceil(p//K) for p in piles) <= H
piles = eval(input())
H = int(input())
piles.sort()
n = len(piles)
lo, hi = 1, piles[-1]
while lo < hi:
mi = (lo + hi) // 2
if not eatable(mi):
lo = mi + 1
else:
hi = mi
print(lo)
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
146f1bd15ca0c981c0fdfab12e8f0022dc4b8bed | 181e9cc9cf4e52fcc6e9979890cc5b41e7beb756 | /Module 2/8/optical_flow.py | 07265bd486c096847a83b24b7b2105c54bd53c09 | [
"MIT"
] | permissive | PacktPublishing/OpenCV-Computer-Vision-Projects-with-Python | ace8576dce8d5f5db6992b3e5880a717996f78cc | 45a9c695e5bb29fa3354487e52f29a565d700d5c | refs/heads/master | 2023-02-09T14:10:42.767047 | 2023-01-30T09:02:09 | 2023-01-30T09:02:09 | 71,112,659 | 96 | 72 | null | null | null | null | UTF-8 | Python | false | false | 2,709 | py | import cv2
import numpy as np
def start_tracking():
cap = cv2.VideoCapture(0)
scaling_factor = 0.5
num_frames_to_track = 5
num_frames_jump = 2
tracking_paths = []
frame_index = 0
tracking_params = dict(winSize = (11, 11), maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
while True:
ret, frame = cap.read()
frame = cv2.resize(frame, None, fx=scaling_factor,
fy=scaling_factor, interpolation=cv2.INTER_AREA)
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
output_img = frame.copy()
if len(tracking_paths) > 0:
prev_img, current_img = prev_gray, frame_gray
feature_points_0 = np.float32([tp[-1] for tp in tracking_paths]).reshape(-1, 1, 2)
feature_points_1, _, _ = cv2.calcOpticalFlowPyrLK(prev_img, current_img, feature_points_0,
None, **tracking_params)
feature_points_0_rev, _, _ = cv2.calcOpticalFlowPyrLK(current_img, prev_img, feature_points_1,
None, **tracking_params)
diff_feature_points = abs(feature_points_0-feature_points_0_rev).reshape(-1, 2).max(-1)
good_points = diff_feature_points < 1
new_tracking_paths = []
for tp, (x, y), good_points_flag in zip(tracking_paths,
feature_points_1.reshape(-1, 2), good_points):
if not good_points_flag:
continue
tp.append((x, y))
if len(tp) > num_frames_to_track:
del tp[0]
new_tracking_paths.append(tp)
cv2.circle(output_img, (x, y), 3, (0, 255, 0), -1)
tracking_paths = new_tracking_paths
cv2.polylines(output_img, [np.int32(tp) for tp in tracking_paths], False, (0, 150, 0))
if not frame_index % num_frames_jump:
mask = np.zeros_like(frame_gray)
mask[:] = 255
for x, y in [np.int32(tp[-1]) for tp in tracking_paths]:
cv2.circle(mask, (x, y), 6, 0, -1)
feature_points = cv2.goodFeaturesToTrack(frame_gray, mask = mask, maxCorners = 500,
qualityLevel = 0.3, minDistance = 7, blockSize = 7)
if feature_points is not None:
for x, y in np.float32(feature_points).reshape(-1, 2):
tracking_paths.append([(x, y)])
frame_index += 1
prev_gray = frame_gray
cv2.imshow('Optical Flow', output_img)
c = cv2.waitKey(1)
if c == 27:
break
if __name__ == '__main__':
start_tracking()
cv2.destroyAllWindows()
| [
"prasadr@packtpub.com"
] | prasadr@packtpub.com |
06a013d515063e5e111d708abaec37946b034460 | e953679220ff59b58eb964b97a98ef026283c8e6 | /Ch23/stack.py | 1791c00c369728cdaf25f18088eef294ec44fbf4 | [] | no_license | lhy0807/A2CS | 9e440b85b53c79eb0367f3c478f866911422b8d8 | 6d793c1cc4989b123ba8ff1676e376681531c7d2 | refs/heads/master | 2021-04-15T06:10:36.178244 | 2018-03-23T02:54:55 | 2018-03-23T02:54:55 | 125,968,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,293 | py | '''
Stack and Queue Demo
Tianhe Zhang S3C2
'''
nullPtr = -1
class Node(object):
def __init__(self, data, ptr):
self.data = data
self.ptr = ptr
class Stack(object):
def __init__(self, space):
self.space = space
self.startPtr = nullPtr
self.freePtr = 0
self.record = []
for i in range(space):
newNode = Node(None, i+1)
self.record += [newNode]
# set the last ptr
self.record[-1].ptr = nullPtr
def push(self, value):
if (self.startPtr == nullPtr):
self.startPtr = self.freePtr
self.freePtr = self.record[self.freePtr].ptr
self.record[self.startPtr].data = value
self.record[self.startPtr].ptr = nullPtr
else:
preStartPtr = self.startPtr
self.startPtr = self.freePtr
self.freePtr = self.record[self.freePtr].ptr
self.record[self.startPtr].data = value
self.record[self.startPtr].ptr = preStartPtr
def pop(self):
tempStart = self.startPtr
value = self.record[tempStart].data
self.record[tempStart].data = None
tempFree = self.freePtr
self.freePtr = tempStart
self.startPtr = self.record[self.startPtr].ptr
self.record[tempStart].ptr = tempFree
return value
def showNodes(self):
tempPtr = self.startPtr
while (tempPtr != nullPtr):
print(self.record[tempPtr].data)
tempPtr = self.record[tempPtr].ptr
| [
"lihongyu0807@icloud.com"
] | lihongyu0807@icloud.com |
14fd5e3a891a56b1846fc53491c98caed781cdc1 | 14bca3c05f5d8de455c16ec19ac7782653da97b2 | /lib/kubernetes/__init__.py | 7dc22994fecd6a57e8fc2bbf75a8912e2444696e | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | hovu96/splunk_as_a_service_app | 167f50012c8993879afbeb88a1f2ba962cdf12ea | 9da46cd4f45603c5c4f63ddce5b607fa25ca89de | refs/heads/master | 2020-06-19T08:35:21.103208 | 2020-06-16T19:07:00 | 2020-06-16T19:07:00 | 196,641,210 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 836 | py | # Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__project__ = 'kubernetes'
# The version is auto-updated. Please do not edit.
__version__ = "10.0.0"
import kubernetes.client
import kubernetes.config
import kubernetes.watch
import kubernetes.stream
import kubernetes.utils
| [
"robert.fujara@gmail.com"
] | robert.fujara@gmail.com |
ac36c8ce5dfd392dbb60fc56b074198f4aaf3ad3 | 1985e545df5ddfee396e87af6501fe517661cc77 | /bin/make_slides_latex | 0ab0b3d4da608fbd2545f1575110858b17767c0a | [
"MIT"
] | permissive | blester125/dotfiles | 46e657966582ba0b4552317107c85a44426ce9fd | 03b6856552040246b4d60330d0af6f37b440024d | refs/heads/master | 2023-07-24T11:13:11.989638 | 2023-07-12T13:30:15 | 2023-07-12T14:50:18 | 127,983,262 | 1 | 0 | null | 2022-02-12T23:09:41 | 2018-04-04T00:10:30 | Emacs Lisp | UTF-8 | Python | false | false | 1,866 | #!/usr/bin/python3
import os
import argparse
import textwrap
from datetime import datetime
def slides(title, author, affiliation, month, day, year):
return textwrap.dedent(r"""
\documentclass{beamer}
\usefonttheme[onlymath]{serif}
\usepackage[utf8]{inputenc}
\usepackage{amsmath}
\usepackage{array}
\usepackage{graphicx}
\usepackage{mathtools}
\usepackage{minted}
\usepackage{hyperref}
\hypersetup{
colorlinks=true,
linkcolor=blue,
}
\usemintedstyle{manni}
\newminted{python}{fontsize=\footnotesize}
\usetheme{Pittsburgh}
\usepackage{pgfpages}
\setbeamertemplate{note page}{\pagecolor{yellow!5}\insertnote}
\setbeameroption{show notes on second screen=right}
\title{%s}
\author{%s}
\institute{%s}
\date{%s, %s, %s}
\def\R{\mathbb{R}}
\begin{document}
\frame{\titlepage}
\end{document}
""".lstrip("\n")) % (title, author, affiliation, month, day, year)
def main():
d = datetime.today()
parser = argparse.ArgumentParser()
parser.add_argument("--base-dir", "--base_dir", default="slides")
parser.add_argument("--slide-file", "--slide_file", default="slides.tex")
parser.add_argument("--title", required=True)
parser.add_argument("--author", default="Brian Lester")
parser.add_argument("--affiliation", default="")
parser.add_argument("--month", default=d.strftime("%B"))
parser.add_argument("--day", default=str(d.day))
parser.add_argument("--year", default=str(d.year))
args = parser.parse_args()
if not os.path.exists(args.base_dir):
os.makedirs(args.base_dir)
with open(os.path.join(args.base_dir, args.slide_file), "w") as f:
f.write(slides(args.title, args.author, args.affiliation, args.month, args.day, args.year))
if __name__ == "__main__":
main()
| [
"blester125@gmail.com"
] | blester125@gmail.com | |
2d8ee210a856ac054047f076257cc2193f4eae69 | 8a2083328c59b67a08c7c1b76568ac62ae632628 | /W6/api_service/urls.py | 79e63752be5e1e7debbbb5cdccfb964018397fa2 | [] | no_license | kasra-najafi/BootCampTabestan99 | 8a5964af2aa5033140d1c1b85fc0fdbc13f68683 | c8de2b7f1eead7a5a4c6a7e054b110b6c5aed5ca | refs/heads/master | 2022-12-12T22:01:25.868417 | 2020-09-10T14:22:21 | 2020-09-10T14:22:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 965 | py | from django.urls import path, include
from rest_framework import routers
from rest_framework.urlpatterns import format_suffix_patterns
from . import views
router = routers.DefaultRouter()
# router.register('movie', views.MovieView)
# router.register('salon', views.SalonView)
# router.register('cinema', views.CinemaView)
urlpatterns = [
# path('', include(router.urls)),
path('salon', views.SalonList.as_view(), name='salon'),
path('salon/<int:pk>', views.SalonDetail.as_view(), name='salon-detail'),
path('movies', views.MovieView.as_view()),
path('hello', views.hello),
path('comments', views.CommentList.as_view(), name='comment-list'),
path('comments/<int:pk>', views.CommentDetail.as_view(), name='comment-detail'),
path('articles', views.ArticleList.as_view(), name='article-list'),
path('articles/<int:pk>', views.ArticleDetail.as_view(), name='article-detail'),
]
# urlpatterns = format_suffix_patterns(urlpatterns)
| [
"teghfo@gmail.com"
] | teghfo@gmail.com |
2dc741ed1a74d1acd37a4d2bcb5fb0f0da822a6d | 79e1a5ad019b261034bc6338e894679d3f5d54d9 | /Search in Rotated Sorted Array.py | 7577b548b7c5f1d0afbfd732119ed546f960924d | [
"MIT"
] | permissive | ngdeva99/Fulcrum | c615f457ec34c563199cc1dab243ecc62e23ad0b | 3a5c69005bbaf2a5aebe13d1907f13790210fb32 | refs/heads/master | 2022-12-15T19:35:46.508701 | 2020-09-09T06:47:48 | 2020-09-09T06:48:08 | 294,027,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | class Solution(object):
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
# worst soln O(n)
if target in nums:
return nums.index(target)
else:
return -1
| [
"31466229+ngdeva99@users.noreply.github.com"
] | 31466229+ngdeva99@users.noreply.github.com |
3bae37ab2be5d5df876579debb8c5b2d84567824 | 556da038494ad93b03923577b48f89dd6d70fb48 | /1123 Is It a Complete AVL Tree.py | b80514a01f9fe79cd448a85d7dce05e9f4a1f466 | [] | no_license | junyechen/PAT-Advanced-Level-Practice | f5c9f604c458965c2165960aaac714f69ce1057b | 401c9d3040a0273c0e2461c963b781bcebd33667 | refs/heads/master | 2020-06-19T10:55:19.564725 | 2020-05-12T10:21:14 | 2020-05-12T10:21:14 | 196,684,047 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,964 | py | """
An AVL tree is a self-balancing binary search tree. In an AVL tree, the heights of the two child subtrees of any node differ by at most one; if at any time they differ by more than one, rebalancing is done to restore this property. Figures 1-4 illustrate the rotation rules.
F1.jpg F2.jpg
F3.jpg F4.jpg
Now given a sequence of insertions, you are supposed to output the level-order traversal sequence of the resulting AVL tree, and to tell if it is a complete binary tree.
Input Specification:
Each input file contains one test case. For each case, the first line contains a positive integer N (≤ 20). Then N distinct integer keys are given in the next line. All the numbers in a line are separated by a space.
Output Specification:
For each test case, insert the keys one by one into an initially empty AVL tree. Then first print in a line the level-order traversal sequence of the resulting AVL tree. All the numbers in a line must be separated by a space, and there must be no extra space at the end of the line. Then in the next line, print YES if the tree is complete, or NO if not.
Sample Input 1:
5
88 70 61 63 65
Sample Output 1:
70 63 88 61 65
YES
Sample Input 2:
8
88 70 61 96 120 90 65 68
Sample Output 2:
88 65 96 61 70 90 120 68
NO
"""
#####################################
"""
再次温故了AVL树,复习的时间就比之间预习的时间短很多
但是对python的类的写法不熟悉,花了很久时间,最终还是参考了别人的代码
其中对于节点height的设定比较巧妙,默认是0(叶节点),如果是None则是-1
由于数据量非常小,所以都能AC通过
"""
#####################################
class Node:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
self.height = 0
class AVLTree:
def __init__(self):
self.root = None
def height(self, root):
if root is None:
return -1
else:
return root.height
def check_balance(self, root):
return self.height(root.left) - self.height(root.right)
def right_rotation(self, root):
r = root.left
root.left = r.right
r.right = root
root.height = max(self.height(root.left), self.height(root.right)) + 1
return r
def left_rotation(self, root):
r = root.right
root.right = r.left
r.left = root
root.height = max(self.height(root.left), self.height(root.right)) + 1
return r
def insertion(self, node):
if self.root is None:
self.root = node
else:
self.root = self.insertion_(node, self.root)
def insertion_(self, node, root):
if root is None:
root = node
elif node.value < root.value:
root.left = self.insertion_(node, root.left)
if self.check_balance(root) == 2:
if self.check_balance(root.left) == 1:
root = self.right_rotation(root)
else:
root.left = self.left_rotation(root.left)
root = self.right_rotation(root)
root.height = max(self.height(root.left), self.height(root.right)) + 1
else:
root.right = self.insertion_(node, root.right)
if self.check_balance(root) == -2:
if self.check_balance(root.right) == -1:
root = self.left_rotation(root)
else:
root.right = self.right_rotation(root.right)
root = self.left_rotation(root)
root.height = max(self.height(root.left), self.height(root.right)) + 1
return root
def in_order(self):
if self.root is None:
return []
else:
res = wait_que = [self.root]
flag = True
num = 1
while wait_que:
temp = []
for node in wait_que:
if node.left is not None:
temp.append(node.left)
if flag:
num += 1
else:
if flag:
flag = False
if node.right is not None:
temp.append(node.right)
if flag:
num += 1
else:
if flag:
flag = False
wait_que = temp
res += wait_que
if num == len(res):
flag = True
else:
flag = False
return res + [flag]
n = int(input())
numbers = list(map(int, input().split()))
avl_tree = AVLTree()
for i in range(n):
avl_tree.insertion(Node(numbers[i]))
res = avl_tree.in_order()
print(' '.join(map(str, [x.value for x in res[:-1]])))
if res[-1]:
print('YES')
else:
print('NO')
| [
"chenjunyeword@outlook.com"
] | chenjunyeword@outlook.com |
000d8b674f9d6bf558f2ad9bbcfec27a3c8c5e5d | 00b2483123c0dac4483725eed199430b237696b4 | /template_utils/templatetags/philterz.py | 49d5ec0404853b5eca8407ca1a6612e526380ea8 | [
"BSD-3-Clause"
] | permissive | springmerchant/django-template-utils | 4f50c76cd76c9571f587138284d64fd818ccc1d7 | a280bfc73e70426c354af4fd6f34cb65edeb61dd | refs/heads/master | 2021-01-18T07:47:04.369761 | 2009-10-29T17:59:23 | 2009-10-29T17:59:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | from django import template
from template_utils import filters
register = template.Library()
for name,filter_func in filters.items():
register.filter(name, filter_func) | [
"justquick@gmail.com"
] | justquick@gmail.com |
b72c61790768caf7ddd40e925a18c490f701c2c2 | 9947d1e328a3262a35a61385dc537c3dc557ab7d | /数据分析/day05/demo07_sjph.py | 8b3ed1c199bf87a6be6eb992af2684f9b6ed937e | [] | no_license | nuass/lzh | d0a7c74a3295523d1fe15eeaa73997fc04469f06 | 3cb1cf1e448b88ade226d113a7da4eab7bbb5c09 | refs/heads/master | 2021-02-06T06:10:32.772831 | 2019-06-10T08:54:49 | 2019-06-10T08:54:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,633 | py | # coding=utf-8
"""
数据平滑
1、计算两只股票的收益率曲线并绘制
2、分析曲线形状,确定投资策略
"""
import numpy as np
import matplotlib.pyplot as mp
import datetime as dt
import matplotlib.dates as md
def dmy2ymd(dmy):
#把二进制字符串转为普通字符串
dmy = str(dmy,encoding='utf-8')
t=dt.datetime.strptime(dmy,'%d-%m-%Y')
s=t.date().strftime('%Y-%m-%d')
return s
dates,val_closing_price=np.loadtxt("../da_data/vale.csv",
usecols=(1,6),
unpack=True,
dtype='M8[D],f8',
delimiter=',',
converters={1:dmy2ymd})
bhp_closing_price=np.loadtxt("../da_data/bhp.csv",
usecols=(6,),
unpack=True,
dtype='f8',
delimiter=',')
#绘制收盘价的折现图
mp.figure("PolyFit",facecolor='lightgray')
mp.title("PolyFit",fontsize=14)
mp.xlabel("Date",fontsize=12)
mp.ylabel("Price",fontsize=12)
mp.tick_params(labelsize=10)
mp.grid(linestyle=':')
#设置刻度定位器
ax=mp.gca()
#设置朱刻度定位器-每周一一个主刻度
major_loc = md.WeekdayLocator(byweekday=md.MO)
ax.xaxis.set_major_locator(major_loc)
ax.xaxis.set_major_formatter(md.DateFormatter('%Y-%m-%d'))
#设置次刻度定位器为日定位器
minor_loc = md.DayLocator()
ax.xaxis.set_minor_locator(minor_loc)
dates=dates.astype(md.datetime.datetime)
#计算收益率
bhp_returns = np.diff(bhp_closing_price)/bhp_closing_price[:-1]
vale_returns = np.diff(val_closing_price)/val_closing_price[:-1]
dates = dates[:-1]
mp.plot(dates,bhp_returns,c='dodgerblue',label='bhp_returns',alpha=0.3)
mp.plot(dates,vale_returns,c='orangered',label='vale_returns',alpha=0.3)
#卷积降噪
kernel = np.hanning(8)
kernel /=kernel.sum()
print(kernel)
bhp=np.convolve(bhp_returns,kernel,'valid')
vale = np.convolve(vale_returns,kernel,'valid')
#针对vale与bhp分别做多项式
days = dates[7:].astype('M8[D]').astype('int32')
P_vale=np.polyfit(days,vale,3)
P_bhp=np.polyfit(days,bhp,3)
y_vale=np.polyval(P_vale,days)
y_bhp = np.polyval(P_bhp,days)
mp.plot(dates[7:],y_vale,c="orangered",label="vale_convolved")
mp.plot(dates[7:],y_bhp,c="dodgerblue",label="bhp_convolved")
#求多个多项式的交点位置
P=np.polysub(P_vale,P_bhp)
xs = np.roots(P)
dates = np.floor(xs).astype('M8[D]')
print(dates)
mp.legend()
mp.gcf().autofmt_xdate()
mp.show()
mp.legend()
mp.gcf().autofmt_xdate()
mp.show() | [
"1581627402@qq.com"
] | 1581627402@qq.com |
2a1c018f6703f1b3ca0e7dd870682aa996c0bb44 | f5d77defeaa244ed8df517d13f21cd6f073adebc | /programas/menuMatricesBien.py | a6965b5713951995d7683434d8a5d834feca9404 | [] | no_license | lizetheP/PensamientoC | 02a02cf6d537e1637a933a4f3957995f6690d7d6 | 5d5740e0afa4fc487fdc5f2c466df63e9b4a664f | refs/heads/master | 2023-08-10T08:07:09.604983 | 2023-08-08T16:53:10 | 2023-08-08T16:53:10 | 200,893,283 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 818 | py | def main():
r = int(input("Introduce el número de renglones: "))
c = int(input("Introduce el número de columnas: "))
m = creaMatriz(r, c)
continua = True
while continua == True:
menu()
opcion = int(input("Introduce una opcion: "))
if opcion == 1:
imprime_matriz(m)
elif opcion == 2:
imprime_matriz(m)
res = promedio_matriz(m)
print("El promedio es %.1f" % res)
elif opcion == 3:
# COMPLETAR
elif opcion == 4:
imprime_matriz(m)
posicion_pares(m)
elif opcion == 5:
# COMPLETAR
elif opcion == 6:
print("Adiós")
continua = False
else:
print("ERROR OPCION INVALIDA")
main()
| [
"lizetheperez@gmail.com"
] | lizetheperez@gmail.com |
7198a324184785aebf8480881a3c40c2bfd83147 | ebcea394905df8222c257c8c6c469627a6e48095 | /PyQt5/object_detection/builders/region_similarity_calculator_builder_test.py | 4e14556344509ed5159f7f7b24ff599b469ec41d | [] | no_license | valiok98/Python-Qt5-Tensorflow | 2773cfc2a0e569ed53cf3d90066885f17abe8c6a | e03ccc2884b687a36fbe47f5ff320837be3e217a | refs/heads/master | 2021-09-17T20:41:01.908602 | 2018-03-31T12:42:25 | 2018-03-31T12:42:25 | 103,644,683 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,639 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for region_similarity_calculator_builder."""
import tensorflow as tf
from google.protobuf import text_format
import sys
sys.path.append("..")
from builders import region_similarity_calculator_builder
from core import region_similarity_calculator
from protos import region_similarity_calculator_pb2 as sim_calc_pb2
class RegionSimilarityCalculatorBuilderTest(tf.test.TestCase):
def testBuildIoaSimilarityCalculator(self):
similarity_calc_text_proto = """
ioa_similarity {
}
"""
similarity_calc_proto = sim_calc_pb2.RegionSimilarityCalculator()
text_format.Merge(similarity_calc_text_proto, similarity_calc_proto)
similarity_calc = region_similarity_calculator_builder.build(
similarity_calc_proto)
self.assertTrue(isinstance(similarity_calc,
region_similarity_calculator.IoaSimilarity))
def testBuildIouSimilarityCalculator(self):
similarity_calc_text_proto = """
iou_similarity {
}
"""
similarity_calc_proto = sim_calc_pb2.RegionSimilarityCalculator()
text_format.Merge(similarity_calc_text_proto, similarity_calc_proto)
similarity_calc = region_similarity_calculator_builder.build(
similarity_calc_proto)
self.assertTrue(isinstance(similarity_calc,
region_similarity_calculator.IouSimilarity))
def testBuildNegSqDistSimilarityCalculator(self):
similarity_calc_text_proto = """
neg_sq_dist_similarity {
}
"""
similarity_calc_proto = sim_calc_pb2.RegionSimilarityCalculator()
text_format.Merge(similarity_calc_text_proto, similarity_calc_proto)
similarity_calc = region_similarity_calculator_builder.build(
similarity_calc_proto)
self.assertTrue(isinstance(similarity_calc,
region_similarity_calculator.
NegSqDistSimilarity))
if __name__ == '__main__':
tf.test.main()
| [
"valentin1998v@gmail.com"
] | valentin1998v@gmail.com |
7d7b52fdc14896dc450acd885abaade312c79314 | c099611e42319053888a747ea78468224e45a725 | /Polar-slepian/V_28/polarfilesim_FERvsp_FRon512_0e01.py | 729cea36803ed8b4a701ec961cad512d458c71d4 | [] | no_license | sbsoumya/PolarProject-Code_Res | 118f54593716520c71cdc0e479236ffdc1a94f89 | 12a3b6fb24cf8160a519c74b064fd845066cbe0b | refs/heads/master | 2021-06-27T21:04:41.057937 | 2019-03-22T20:56:44 | 2019-03-22T20:56:44 | 129,615,052 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,264 | py | #-------------------------------------------------------------------------------
# Name: polarchannelsim_FERvsR_derate.py
# Purpose: FER VS R simulation for given code and different rates
# p
# Author: soumya
#
# Created: 19/08/2017
#----------------------------------------
import numpy as np
import math as ma
import problib as pl
import polarencdec as ec
import polarconstruct as pcon
from datetime import datetime
import json
import polarfile as pf
from pprint import pprint
from timeit import default_timer as timer
#=================================================================simulation
#------------Number of good channels = capacity
start = timer()
Nlist=[512]
N=Nlist[0]
channel_plist=list(np.linspace(0.01,0.2,20))
design_plist=channel_plist
L=0.01
Lexp=np.log10(L)
Rlist=[len(pcon.getGChZCL(p,N,Lexp)[0]) for p in design_plist]
print Rlist
print design_plist
runsim=1000
start=timer()
stamp=datetime.now().strftime("%y-%m-%d_%H-%M-%S")
filename="./simresults/polarfile_FERvsp_FR"+str(L).replace(".","e")+"in"+str(N)+"_"+stamp+".txt"
f1=open(filename,'w')
print filename
print "P Vs FER REPORT derate"
print "---------------------------"
print "N="+str(N)
print "sim ran :"+str(runsim)
json.dump( "P Vs FER REPORT derate",f1) ;f1.write("\n")
json.dump( "---------------------------",f1) ;f1.write("\n")
json.dump( "N="+str(N),f1) ;f1.write("\n")
json.dump("sim ran :"+str(runsim),f1) ;f1.write("\n")
FER=[];
start=timer()
for i in range(len(design_plist)):
print design_plist[i];
print Rlist[i]
block_error=pf.polarfilesim_FR(N,design_plist[i],design_plist[i],Rlist[i],runsim,False)
#~ if block_error==0:
#block_error=pch.polarchannelsim_FR(N,design_p,design_p,msg_length,runsim,False)
FER.append(block_error)
print "Z max :"+str(L)
block_error_exp=np.log10(FER).tolist()
print design_plist
print Rlist
print block_error_exp
json.dump("Z max :"+str(L),f1) ;f1.write("\n")
json.dump( "Rate vs Block_error=",f1) ;f1.write("\n")
json.dump(design_plist,f1) ;f1.write("\n")
json.dump(Rlist,f1) ;f1.write("\n")
json.dump(block_error_exp,f1) ;f1.write("\n")
end = timer()
TC=(end-start)
print "Time taken:"+str(TC)
json.dump("Time taken:"+str(TC) ,f1) ;f1.write("\n")
| [
"soumya.s.banerjee17@gmail.com"
] | soumya.s.banerjee17@gmail.com |
b28f86009c545dda8122ea50e738ce26a7a8bf8f | 96b828a27ef829d11c488fcb3a918f8a45cf5c6c | /docs/code/answers/geocoder_cli_example/cli.py | d49440157653aa2c112ea957e36e87be207dee1e | [] | no_license | stanfordjournalism/compciv-2017-classsite | 07071ad1fb3c23d8d86089c992e3b695b4b7334a | 1b5b243db9130aa480cb147cd5dc6c1726e438e3 | refs/heads/master | 2021-01-12T01:51:24.202423 | 2017-03-04T02:19:20 | 2017-03-04T02:19:20 | 78,438,350 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 729 | py | from argparse import ArgumentParser
from geocoder import geocode
import json
if __name__ == '__main__':
parser = ArgumentParser(description='Use Mapzen to geocode a location')
parser.add_argument('location', type=str,
help='A human-readable description of your location/address')
parser.add_argument('api_key', type=str, help='Your Mapzen API key')
args = parser.parse_args()
mapzen_result = geocode(api_key=args.api_key, location_name=args.location)
if not mapzen_result:
print("Sorry, could not geocode the location:", args.location)
else:
# print dictionary as a prettified JSON
txt = json.dumps(mapzen_result, indent=2)
print(txt)
| [
"dansonguyen@gmail.com"
] | dansonguyen@gmail.com |
788fc853a9bc6dd38382b3aa2c23a05ca47f8059 | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/network/v20200601/get_virtual_network_gateway_advertised_routes.py | 12615dcb3e30dd09a0cc7d5046011e105f5e53b0 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 2,718 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetVirtualNetworkGatewayAdvertisedRoutesResult',
'AwaitableGetVirtualNetworkGatewayAdvertisedRoutesResult',
'get_virtual_network_gateway_advertised_routes',
]
@pulumi.output_type
class GetVirtualNetworkGatewayAdvertisedRoutesResult:
"""
List of virtual network gateway routes.
"""
def __init__(__self__, value=None):
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.GatewayRouteResponseResult']]:
"""
List of gateway routes.
"""
return pulumi.get(self, "value")
class AwaitableGetVirtualNetworkGatewayAdvertisedRoutesResult(GetVirtualNetworkGatewayAdvertisedRoutesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualNetworkGatewayAdvertisedRoutesResult(
value=self.value)
def get_virtual_network_gateway_advertised_routes(peer: Optional[str] = None,
resource_group_name: Optional[str] = None,
virtual_network_gateway_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualNetworkGatewayAdvertisedRoutesResult:
"""
Use this data source to access information about an existing resource.
:param str peer: The IP address of the peer.
:param str resource_group_name: The name of the resource group.
:param str virtual_network_gateway_name: The name of the virtual network gateway.
"""
__args__ = dict()
__args__['peer'] = peer
__args__['resourceGroupName'] = resource_group_name
__args__['virtualNetworkGatewayName'] = virtual_network_gateway_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20200601:getVirtualNetworkGatewayAdvertisedRoutes', __args__, opts=opts, typ=GetVirtualNetworkGatewayAdvertisedRoutesResult).value
return AwaitableGetVirtualNetworkGatewayAdvertisedRoutesResult(
value=__ret__.value)
| [
"noreply@github.com"
] | test-wiz-sec.noreply@github.com |
ecf53884239bea0f047927f8f693da61a51e6354 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_19221.py | ff1b91bdfc966fd232578587be239b172029434a | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | # Dynamically query a subset of columns in sqlalchemy
columns = ['id', 'name']
print session.query(select(from_obj=User, columns=columns)).all()
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
4fd0458346cfc1f300a6b78b7ba03809bf1c3d3f | ec931947aa3e06ce565637e7ee1cb707f56375a2 | /aoc2018/day6/day6_part2.py | fb8337d87afdfcb2240afef7c44b2166da73dd16 | [] | no_license | chrisglencross/advent-of-code | 5f16ed7e2265d27ce15f502ce2a1c2f11fc99fc0 | 21623d4aa01a9e20285a0233c50f8f56c4099af5 | refs/heads/master | 2023-01-24T22:01:30.829679 | 2023-01-12T23:03:03 | 2023-01-12T23:03:03 | 224,833,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | with open("input", "r") as f:
lines = f.readlines()
coords = []
for line in lines:
vals = line.split(",")
coords.append(tuple([int(vals[0]), int(vals[1])]))
min_x = min(coords, key=lambda coord: coord[0])[0] - 1
max_x = max(coords, key=lambda coord: coord[0])[0] + 1
min_y = min(coords, key=lambda coord: coord[1])[1] - 1
max_y = max(coords, key=lambda coord: coord[1])[1] + 1
count = 0
for y in range(min_y, max_y + 1):
row = []
for x in range(min_x, max_x + 1):
total = 0
for coord in coords:
total = total + abs(coord[0] - x) + abs(coord[1] - y)
if total < 10000:
count = count + 1
print(count)
| [
"chris@glencross.org"
] | chris@glencross.org |
62ff4e83b1550dfb0cb3a8ebf0e60136e91b130b | e1e5ffef1eeadd886651c7eaa814f7da1d2ade0a | /Systest/tests/acl-new/ACL_FUN_031.py | c7a7f4f52e9f3d9e171a0004cb7de3cba746a1d7 | [] | no_license | muttu2244/MyPython | 1ddf1958e5a3514f9605d1f83c0930b24b856391 | 984ca763feae49a44c271342dbc15fde935174cf | refs/heads/master | 2021-06-09T02:21:09.801103 | 2017-10-10T07:30:04 | 2017-10-10T07:30:04 | 13,803,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,529 | py | #!/usr/bin/env python2.5
"""
#######################################################################
#
# Copyright (c) Stoke, Inc.
# All Rights Reserved.
#
# This code is confidential and proprietary to Stoke, Inc. and may only
# be used under a license from Stoke.
#
#######################################################################
DESCRIPTION:To verify wheteher ssx is allowing traffic or not when applied acl deleated from context
TEST PLAN: Sanity Test plans
TEST CASES: ACL_FUN_031
TOPOLOGY DIAGRAM:
--------------------------------------------------------------------------------
| LINUX SSX
| Trans IP = 2.2.2.3/24 TransIP = 2.2.2.45/24 |
| eth1 Port 2/1 |
--------------------------------------------------------------------------------
AUTHOR:
REVIEWER:
HOW TO RUN : python ACL_FUN_031.py
"""
import sys, os, getopt
mydir = os.path.dirname(__file__)
qa_lib_dir = os.path.join(mydir, "../../lib/py")
if qa_lib_dir not in sys.path:
sys.path.insert(1,qa_lib_dir)
# Frame-work libraries
from SSX import *
from Linux import *
from log import *
from StokeTest import test_case, test_suite, test_runner
from log import buildLogger
from logging import getLogger
from acl import *
from helpers import is_healthy
import re
#import configs file
from config import *
from topo import *
class test_ACL_FUN_031(test_case):
myLog = getLogger()
def setUp(self):
#Establish a telnet session to the SSX box.
self.ssx = SSX(ssx["ip_addr"])
self.linux=Linux(xpress_vpn['ip_addr'],xpress_vpn['user_name'],xpress_vpn['password'])
self.ssx.telnet()
self.linux.telnet()
# Clear the SSX config
self.ssx.clear_config()
# wait for card to come up
self.ssx.wait4cards()
self.ssx.clear_health_stats()
def tearDown(self):
# Close the telnet session of SSX
self.ssx.close()
self.linux.close()
def test_ACL_FUN_031(self):
#vgroup b/w ssx and host
#out = ssx["ip_addr"].split("-mc")[0]
#os.system("vgroup %s:%s %s:%s"%(out,ssx1_port,xpress_vpn['ip_addr'],xpress_vpn['interface']))
#configuring tunnel on linux machine
self.linux.configure_ip_interface(p1_ssx_xpressvpn[1], script_var['xpress_phy_iface1_ip_mask'])
self.myLog.output("==================Starting The Test====================")
# Push SSX config
self.ssx.config_from_string(script_var['ACL_FUN_031'])
#changing context and clearing ip counters
self.ssx.cmd("context %s" %(script_var['context_name']))
self.ssx.cmd("clear ip counters")
#ping operation
ping_op=self.ssx.ping(dest=script_var['xpress_phy_iface1_ip'])
self.failIfEqual(ping_op,1,"Ping Throught Interface Failed when ACL was applied")
self.ssx.cmd("show ip counters icmp")
output=ip_verify_ip_counters_icmp(self.ssx,total_tx='5',total='0',echo_request='5', echo_reply='0', unreachable='0', \
mask_request='0', mask_reply='0', source_quench= '0' , param_problem='0', timestamp='0',\
redirects='0', info_reply='0', ttl_expired='0', other='0')
self.failUnlessEqual(output,1,"Counters Increment Failed")
# Push SSX config
self.ssx.config_from_string(script_var['ACL_FUN_031-1'])
#changing context and clearing ip counters
self.ssx.cmd("context %s" %(script_var['context_name']))
self.ssx.cmd("clear ip counters")
#ping operation
ping_op=self.ssx.ping(dest=script_var['xpress_phy_iface1_ip'])
self.failIfEqual(ping_op,0,"Ping Throught Interface Failed when ACL was applied")
output=ip_verify_ip_counters_icmp(self.ssx,total_tx='5',total='5',echo_request='5', echo_reply='5', unreachable='0', \
mask_request='0', mask_reply='0', source_quench= '0' , param_problem='0', timestamp='0',\
redirects='0', info_reply='0', ttl_expired='0', other='0')
self.failUnlessEqual(output,0,"Counters Increment Failed")
# Checking SSX Health
hs = self.ssx.get_health_stats()
self.failUnless(is_healthy( hs), "Platform is not healthy")
if __name__ == '__main__':
filename = os.path.split(__file__)[1].replace('.py','.log')
log = buildLogger(filename, debug=True, console=True)
suite = test_suite()
suite.addTest(test_ACL_FUN_031)
test_runner(stream = sys.stdout).run(suite)
| [
"muttu2244@yahoo.com"
] | muttu2244@yahoo.com |
39f8fa3b4b676a7f77f18b6dfd2d1da31e7f16ee | 400571adc11a8cbeefcf7c3fee84af78d7354c3d | /functionalprograms/harmonic.py | b9ee2c26835637bc63f903eba109f82f1236798d | [] | no_license | noothanprem/python | b91c0d65d436ad905301d163db115b87fdfaf4d2 | d736c2606ef2844b524889e2c4a8ded80fd1269d | refs/heads/master | 2020-07-21T08:21:47.187512 | 2019-09-16T13:42:39 | 2019-09-16T13:42:39 | 206,795,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | """
Name : Harmonic.py
Date : 26/08/2019
Purpose : Prints the Nth harmonic number: 1/1 + 1/2 + ... + 1/N
"""
from Utility import utility
while True:
try:
#Taking the input value from the user
n=int(input("Enter the harmonic value : "))
harm=0.0
#calling the function in the BL file
harm=utility.harmonic(n)
print("Harmonic value : ",harm)
break
except ValueError:
print("Please enter number only")
| [
"you@example.com"
] | you@example.com |
091444136782e22cecfc963d232c079c5d4e98a9 | d7016f69993570a1c55974582cda899ff70907ec | /sdk/appplatform/azure-mgmt-appplatform/azure/mgmt/appplatform/v2021_06_01_preview/aio/_app_platform_management_client.py | 70ef58681d10a97647256b465412b959decc02cc | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | kurtzeborn/azure-sdk-for-python | 51ca636ad26ca51bc0c9e6865332781787e6f882 | b23e71b289c71f179b9cf9b8c75b1922833a542a | refs/heads/main | 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 | MIT | 2022-07-19T08:05:23 | 2018-11-16T22:15:30 | Python | UTF-8 | Python | false | false | 7,416 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from .. import models as _models
from ..._serialization import Deserializer, Serializer
from ._configuration import AppPlatformManagementClientConfiguration
from .operations import (
AppsOperations,
BindingsOperations,
CertificatesOperations,
ConfigServersOperations,
CustomDomainsOperations,
DeploymentsOperations,
MonitoringSettingsOperations,
Operations,
RuntimeVersionsOperations,
ServicesOperations,
SkusOperations,
)
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class AppPlatformManagementClient: # pylint: disable=client-accepts-api-version-keyword,too-many-instance-attributes
"""REST API for Azure Spring Cloud.
:ivar services: ServicesOperations operations
:vartype services: azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.ServicesOperations
:ivar config_servers: ConfigServersOperations operations
:vartype config_servers:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.ConfigServersOperations
:ivar monitoring_settings: MonitoringSettingsOperations operations
:vartype monitoring_settings:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.MonitoringSettingsOperations
:ivar apps: AppsOperations operations
:vartype apps: azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.AppsOperations
:ivar bindings: BindingsOperations operations
:vartype bindings: azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.BindingsOperations
:ivar certificates: CertificatesOperations operations
:vartype certificates:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.CertificatesOperations
:ivar custom_domains: CustomDomainsOperations operations
:vartype custom_domains:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.CustomDomainsOperations
:ivar deployments: DeploymentsOperations operations
:vartype deployments:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.DeploymentsOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.Operations
:ivar runtime_versions: RuntimeVersionsOperations operations
:vartype runtime_versions:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.RuntimeVersionsOperations
:ivar skus: SkusOperations operations
:vartype skus: azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.SkusOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Gets subscription ID which uniquely identify the Microsoft Azure
subscription. The subscription ID forms part of the URI for every service call. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2021-06-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = AppPlatformManagementClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.services = ServicesOperations(self._client, self._config, self._serialize, self._deserialize)
self.config_servers = ConfigServersOperations(self._client, self._config, self._serialize, self._deserialize)
self.monitoring_settings = MonitoringSettingsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.apps = AppsOperations(self._client, self._config, self._serialize, self._deserialize)
self.bindings = BindingsOperations(self._client, self._config, self._serialize, self._deserialize)
self.certificates = CertificatesOperations(self._client, self._config, self._serialize, self._deserialize)
self.custom_domains = CustomDomainsOperations(self._client, self._config, self._serialize, self._deserialize)
self.deployments = DeploymentsOperations(self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.runtime_versions = RuntimeVersionsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.skus = SkusOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "AppPlatformManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details: Any) -> None:
await self._client.__aexit__(*exc_details)
| [
"noreply@github.com"
] | kurtzeborn.noreply@github.com |
50f6c375f06290a4bd62a1727f1a019e50dc1a71 | 388081b4b22f62920b4f33f8ee6a13ca389dc7c2 | /backend/pubsub.py | 8a393eecc4ca82498365f5f12b127b9aa47fa46e | [] | no_license | NishanthMHegde/CompleteBlockchain | 44e28a3f342539d11c8ac14861bf05fb6397d0aa | bba3c1ff3b4c10abc09e0233b9184b308450e9d5 | refs/heads/master | 2022-12-05T01:06:39.386223 | 2020-08-29T17:37:43 | 2020-08-29T17:37:43 | 281,154,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,144 | py | from pubnub.pubnub import PubNub
from pubnub.pnconfiguration import PNConfiguration
from pubnub.callbacks import SubscribeCallback
from backend.blockchain.block import Block
from backend.wallet.transaction_pool import TransactionPool
from backend.wallet.transactions import Transactions
pnconfig = PNConfiguration()
pnconfig.subscribe_key = 'sub-c-97b26636-ce45-11ea-b3f2-c27cb65b13f4'
pnconfig.publish_key = 'pub-c-15161881-8829-4356-992a-5aeb9a798fa4'
CHANNELS = {
'TEST':'TEST',
'BLOCK': 'BLOCK',
'TRANSACTIONS': 'TRANSACTIONS'
}
class Listener(SubscribeCallback):
"""
Override the default listener message method to suit our requirements
"""
def __init__(self, blockchain, transaction_pool):
self.blockchain = blockchain
self.transaction_pool = transaction_pool
def message(self, pubnub, message_object):
print('Message channel: %s | Message object: %s' % (message_object.channel, message_object.message))
#check if a block was received through the BLOCK channel and then add it to the chaina and then perform replace chain
if message_object.channel == 'BLOCK':
block = Block.from_json(message_object.message)
potential_chain = self.blockchain.chain[:]
#add received block to the chain
potential_chain.append(block)
#perform replace_chain operation
try:
self.blockchain.replace_chain(potential_chain)
#After everytime a block is mined, we need to clear the transaction pool.
self.transaction_pool.clear_transaction(self.blockchain)
print("Chain replacement was successful")
except Exception as e:
print("Chain replacement was not successful: %s" % (e))
elif message_object.channel == 'TRANSACTIONS':
transaction = Transactions.from_json(message_object.message)
self.transaction_pool.set_transaction(transaction)
class PubSub():
"""
Class to handle publish/subscribe from PubNub.
Used to communicate between different blockchain peers.
"""
def __init__(self, blockchain, transaction_pool):
#initialize the pubnub object
self.pubnub = PubNub(pnconfig)
#subscribe to the channels that we need to listen to and receive data
self.pubnub.subscribe().channels(CHANNELS.values()).execute()
#add the listener to listen for incoming block data
self.pubnub.add_listener(Listener(blockchain, transaction_pool))
def publish(self, message, channel):
"""
Method to publish a message via a channel
"""
self.pubnub.publish().channel(channel).message(message).sync()
def broadcast_block(self, block):
"""
Method to broadcast the block in the form of JSON to all peers.
"""
self.publish(block.to_json(), CHANNELS['BLOCK'])
def broadcast_transaction(self, transaction):
"""
Method to broadcast the block in the form of JSON to all peers.
"""
self.publish(transaction.to_json(), CHANNELS['TRANSACTIONS'])
| [
"="
] | = |
a4627337713ae4cdab4c55b56f6553ba75d8ab09 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2847/60658/233839.py | 8769d41faecdf83e2f74e5762b014b1cf9ddda04 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | n = int(input())
li = [int(x) for x in input().split()]
start,end=[int(x) for x in input().split()]
print(sum(li[start-1:end-1]))
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
8652f6feddf3c9d4300b09c18409285c0134dee0 | 0cf3a74494ed92bcdeaf0220ad44475c1b6b2c1c | /airflow/providers/tableau/operators/tableau_refresh_workbook.py | 306c3ded629bf75550db1664059f5e5ecd58ebda | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | bolkedebruin/airflow | 064a0c32dff267f586c3fd4dc4beaae12273d180 | e1fe30c70d0fe9c033db9daf9d4420f7fa815b2d | refs/heads/master | 2023-06-09T11:37:57.734560 | 2022-02-23T12:38:27 | 2022-02-23T12:38:27 | 38,505,714 | 2 | 7 | Apache-2.0 | 2022-03-01T01:22:16 | 2015-07-03T18:27:44 | Python | UTF-8 | Python | false | false | 3,305 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import warnings
from typing import TYPE_CHECKING, Optional
from airflow.models import BaseOperator
from airflow.providers.tableau.operators.tableau import TableauOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
warnings.warn(
"""This operator is deprecated. Please use `airflow.providers.tableau.operators.tableau`.""",
DeprecationWarning,
stacklevel=2,
)
class TableauRefreshWorkbookOperator(BaseOperator):
"""
This operator is deprecated. Please use `airflow.providers.tableau.operators.tableau`.
Refreshes a Tableau Workbook/Extract
.. seealso:: https://tableau.github.io/server-client-python/docs/api-ref#workbooks
:param workbook_name: The name of the workbook to refresh.
:param site_id: The id of the site where the workbook belongs to.
:param blocking: Defines if the job waits until the refresh has finished.
Default: True.
:param tableau_conn_id: The :ref:`Tableau Connection id <howto/connection:tableau>`
containing the credentials to authenticate to the Tableau Server. Default:
'tableau_default'.
:param check_interval: time in seconds that the job should wait in
between each instance state checks until operation is completed
"""
def __init__(
self,
*,
workbook_name: str,
site_id: Optional[str] = None,
blocking: bool = True,
tableau_conn_id: str = 'tableau_default',
check_interval: float = 20,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.workbook_name = workbook_name
self.site_id = site_id
self.blocking = blocking
self.tableau_conn_id = tableau_conn_id
self.check_interval = check_interval
def execute(self, context: 'Context') -> str:
"""
Executes the Tableau Extract Refresh and pushes the job id to xcom.
:param context: The task context during execution.
:return: the id of the job that executes the extract refresh
:rtype: str
"""
job_id = TableauOperator(
resource='workbooks',
method='refresh',
find=self.workbook_name,
match_with='name',
site_id=self.site_id,
tableau_conn_id=self.tableau_conn_id,
blocking_refresh=self.blocking,
check_interval=self.check_interval,
task_id='refresh_workbook',
dag=None,
).execute(context=context)
return job_id
| [
"noreply@github.com"
] | bolkedebruin.noreply@github.com |
d09b1e53f500c7525745b23ce47c8741da7d91ce | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_venturing.py | 6e28c8495a42a130b7dc59b180e4d8f4b4de0e52 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py |
from xai.brain.wordbase.verbs._venture import _VENTURE
#calss header
class _VENTURING(_VENTURE, ):
def __init__(self,):
_VENTURE.__init__(self)
self.name = "VENTURING"
self.specie = 'verbs'
self.basic = "venture"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
318cc033527dc0efcad1e745b383033a4a3fb1b3 | dd4d2589d1f14303cacd3b7ee1dd5f6bacd3bf3c | /lc_previous/single-row-keyboard.py | d8aef3d4f8edad5a077c42b385d456661cbe57a9 | [] | no_license | salujaharkirat/ds-algo | ec22eaae81bdb78f2818248508325a536aedbb7b | 819b5971826d97ec600b92776c5158518c9cbf22 | refs/heads/master | 2023-05-02T17:20:49.425484 | 2021-05-23T07:54:29 | 2021-05-23T07:54:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | class Solution:
def calculateTime(self, keyboard: str, word: str) -> int:
temp = keyboard.find(word[0]) # 2
count = temp #2
for i in range(1, len(word)):
count += abs(temp - keyboard.find(word[i]))
temp = keyboard.find(word[i])
return count
| [
"saluja.harkirat@gmail.com"
] | saluja.harkirat@gmail.com |
26ed431dfbd9705c0e56ad327d292204542f140e | 10008e34625fcc803e814671bfbbe4173cc16540 | /jigna/vue_template.py | 10569332e899bd88d4c5dad6015c51ae5c99fb99 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | enthought/jigna | a375a17806c9b414b6663099f4d7b620ec95c97f | 00192450515099d8abc74866ffe1e4f743fe2279 | refs/heads/master | 2023-03-27T04:52:45.490089 | 2019-07-22T21:06:31 | 2019-07-22T21:06:31 | 11,196,735 | 17 | 14 | NOASSERTION | 2022-09-19T18:31:34 | 2013-07-05T09:58:52 | Python | UTF-8 | Python | false | false | 1,375 | py | #
# Enthought product code
#
# (C) Copyright 2013-2016 Enthought, Inc., Austin, TX
# All right reserved.
#
from textwrap import dedent
# Local imports
from .template import Template
class VueTemplate(Template):
"""A template for vue.js templates. Note that this assumes that the
ViewModel is attached to the body and all the jigna models are exposed.
"""
def _html_template_default(self):
return dedent("""
<html>
<head>
<script type="text/javascript" src="/jigna/jigna-vue.js"></script>
{head_html}
</head>
<body>
{body_html}
<script type="text/javascript">
var vm = undefined;
// jigna.models are ready only when the deferred returned by initialize
// is resolved. One could also use jigna.ready.done.
jigna.initialize({{async: {async}}}).done(function() {{
vm = new Vue({{
el: 'body',
data: jigna.models,
methods: {{
threaded: function(obj, method_name, args) {{
jigna.threaded.apply(jigna, arguments);
}},
}}
}});
}});
</script>
</body>
</html>
""")
| [
"prabhu@aero.iitb.ac.in"
] | prabhu@aero.iitb.ac.in |
c0c8b98c001970ed70da3633b5681e7f725eb608 | 09912a852e0e20d6a475ef904724f80072a68359 | /eds/FrontEnd/server/openmtc-ngsi/src/openmtc_ngsi/tests/ngsi_base.py | c1f34d0163fcba44baaf7dd08d6a4a6e4f4989bd | [
"Apache-2.0"
] | permissive | elastest/elastest-device-emulator-service | 034aa19438383df0975bf86d49e231342d63002f | f512355c5fde6bf027d23558e256b96e2296e0f2 | refs/heads/master | 2021-03-09T15:13:30.676138 | 2020-01-13T12:02:02 | 2020-01-13T12:02:02 | 91,440,225 | 3 | 9 | Apache-2.0 | 2018-12-03T14:59:27 | 2017-05-16T09:26:10 | Python | UTF-8 | Python | false | false | 6,513 | py | import unittest
from openmtc.exc import SCLConflict
from openmtc_ngsi.ngsi import NGSI_9
from openmtc_ngsi.exc import NGSIError
from openmtc_ngsi.tests.xml import registerContextRequestXML
from openmtc_ngsi.requests import RegisterContextRequest
from openmtc_ngsi.ngsi_json import NGSIJSONWriter
from futile.StringIO import StringIO
from futile.logging import LoggerMixin
from subprocess import Popen
from os.path import expanduser
from time import sleep
from futile.net.http.client.RestClient import RestClient
from threading import Thread
from futile import NOT_SET
from futile.threading import Event
from openmtc_ngsi.xml import RequestParser, NGSIXMLWriter
from tempfile import mkdtemp
from shutil import rmtree
from os.path import dirname, abspath
SCL_PATH = dirname(abspath(__file__)) + "/../../../../gscl"
class TimeoutExpired(Exception):
pass
class NotificationHandler(LoggerMixin):
data = NOT_SET
def __init__(self, num = 9, count = 1):
self.event = Event()
self.num = num
self.count = count
def __call__(self, num, data):
if int(num) == self.num:
self.count -= 1
if self.count == 0:
self.data = data
self.event.set()
def wait(self, timeout = 10):
self.logger.debug("Waiting for notification")
if not self.event.wait(timeout):
self.logger.error("Timeout waiting for notification")
raise TimeoutExpired("Timeout waiting for notification")
self.logger.debug("Notification arrived.")
class TestCaseBase(LoggerMixin, unittest.TestCase):
scl_process = None
mongo_process = None
tempdir = None
def setUp(self):
super(TestCaseBase, self).setUp()
self.tempdir = mkdtemp()
try:
self.mongo_process = self._start(["mongod", "--smallfiles",
"--dbpath=" + self.tempdir, "--bind_ip=127.0.0.1",
"--port=37017", "--quiet", "--nojournal"])
try:
self.scl_process = self._start([ expanduser(SCL_PATH), "--dbport=37017" ])
except:
self._kill(self.mongo_process)
self.mongo_process = None
raise
except:
rmtree(self.tempdir)
self.tempdir = None
raise
def _start(self, cmd):
process = Popen(cmd)
sleep(5)
rv = process.poll()
if rv is not None:
raise Exception("%s died with exit code %d." % (cmd, rv, ))
return process
def _kill(self, process):
if process:
try:
process.terminate()
process.wait()
except:
self.logger.excetion("Failed to terminate child process.")
def tearDown(self):
self._kill(self.scl_process)
self._kill(self.mongo_process)
self.scl_process = None
self.mongo_process = None
if self.tempdir:
rmtree(self.tempdir)
self.tempdir = None
class HTTPTestCaseBase(TestCaseBase):
ngsi_uri = "http://localhost:5050"
parser = RequestParser()
writer = NGSIXMLWriter()
def setUp(self):
super(HTTPTestCaseBase, self).setUp()
self._start_flask()
self.client = RestClient(self.ngsi_uri, content_type = "application/xml")
def _start_flask(self):
self.flask_thread = Thread(target = self._run_flask)
self.flask_thread.start()
sleep(1)
def _run_flask(self):
import openmtc_ngsi.wsgi_flask
openmtc_ngsi.wsgi_flask.reset()
openmtc_ngsi.wsgi_flask.main()
def tearDown(self):
try:
if self.flask_thread:
import openmtc_ngsi.wsgi_flask
openmtc_ngsi.wsgi_flask._server.shutdown()
openmtc_ngsi.wsgi_flask._server.server_close()
self.flask_thread.join(3)
if self.flask_thread.isAlive():
raise Exception("Failed to stop flask")
openmtc_ngsi.wsgi_flask._after_notify_hook = None
finally:
super(HTTPTestCaseBase, self).tearDown()
def _send_request(self, path, data, name = None):
if not isinstance(data, basestring):
data = self.writer.serialize(data)
name = name or self.ngsi_name
path = name + path
with self.client.post(path, data) as response:
return self.parser.parse_request(response)
def _register(self):
return self._send_request("/registerContext", registerContextRequestXML, "/NGSI9")
def _install_notification_handler(self, count = 1, handler = None):
import openmtc_ngsi.wsgi_flask
handler = handler or NotificationHandler(num = self.num, count = count)
openmtc_ngsi.wsgi_flask._after_notify_hook = handler
return handler
class NGSITestCaseBase(TestCaseBase):
def __init__(self, *args, **kw):
super(NGSITestCaseBase, self).__init__(*args, **kw)
self._created = []
def setUp(self):
super(NGSITestCaseBase, self).setUp()
#self.scl_process = Popen(expanduser(SCL_PATH))
#self.scl_process.wait(10)
#raise Exception(self.scl_process.returncode)
from openmtc_ngsi.xml import RequestParser
self.parser = RequestParser()
self.ngsi9 = NGSI_9()
self.json_serializer = NGSIJSONWriter()
def tearDown(self):
# self.scl_process.send_signal(SIGTERM)
# self.scl_process.wait(15)
for path in self._created:
self.ngsi9.scl.delete(path)
super(NGSITestCaseBase, self).tearDown()
def _register(self):
request = self.parser.parse_request(StringIO(registerContextRequestXML), RegisterContextRequest)
response = self.ngsi9.registerContext(request)
self._created.append(response._m2m_path)
return response
def _safe_register(self):
try:
self._register()
except NGSIError as e:
if not isinstance(e[0], SCLConflict):
raise
def _log_message_element(self, me):
json = self.json_serializer.serialize(me, True)
self.logger.info("%s: %s", type(me).__name__, json)
| [
"sro"
] | sro |
51b9bca07fa1fb160b51340cbc712c779b7beff7 | 07622a0fb38e843ab0eef4f69bb8fb25d107c06d | /pretrained_mol_sim/Theano-master/theano/gof/tests/test_link.py | 1d56ac8bd6e9f69dda6177d90df70ed873910b3f | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | andreeadeac22/graph_coattention | fa59d77252625e4bee1cb9670e4a0fd0fec98135 | 23781fedaa942ca5614054f965cb7b6543e533fa | refs/heads/master | 2023-08-08T01:51:51.368457 | 2020-02-19T04:56:59 | 2020-02-19T04:56:59 | 207,414,336 | 15 | 4 | MIT | 2023-07-22T15:47:39 | 2019-09-09T22:13:34 | Python | UTF-8 | Python | false | false | 6,263 | py | from __future__ import absolute_import, print_function, division
from copy import deepcopy
import unittest
import numpy
import theano
from theano.gof import graph
from theano.gof.graph import Variable, Apply, Constant
from theano.gof.type import Type
from theano.gof.op import Op
from theano.gof import fg
from theano.gof.link import * # noqa
from theano.compat import cmp
def as_variable(x):
assert isinstance(x, Variable)
return x
class TDouble(Type):
def filter(self, data):
return float(data)
tdouble = TDouble()
def double(name):
return Variable(tdouble, None, None, name=name)
class MyOp(Op):
__props__ = ("nin", "name", "impl")
def __init__(self, nin, name, impl=None):
self.nin = nin
self.name = name
if impl:
self.impl = impl
def make_node(self, *inputs):
assert len(inputs) == self.nin
inputs = [as_variable(i) for i in inputs]
for input in inputs:
if input.type is not tdouble:
raise Exception("Error 1")
outputs = [double(self.name + "_R")]
return Apply(self, inputs, outputs)
def __str__(self):
return self.name
def perform(self, node, inputs, out_):
out, = out_
out[0] = self.impl(*inputs)
add = MyOp(2, 'Add', lambda x, y: x + y)
sub = MyOp(2, 'Sub', lambda x, y: x - y)
mul = MyOp(2, 'Mul', lambda x, y: x * y)
div = MyOp(2, 'Div', lambda x, y: x / y)
def notimpl(self, x):
raise NotImplementedError()
raise_err = MyOp(1, 'RaiseErr', notimpl)
def inputs():
x = double('x')
y = double('y')
z = double('z')
return x, y, z
def perform_linker(fgraph):
lnk = PerformLinker().accept(fgraph)
return lnk
def FunctionGraph(inputs, outputs):
e = fg.FunctionGraph(inputs, outputs)
return e
class TestPerformLinker(unittest.TestCase):
def test_thunk(self):
x, y, z = inputs()
e = mul(add(x, y), div(x, y))
fn, i, o = perform_linker(FunctionGraph([x, y, z], [e])).make_thunk()
i[0].data = 1
i[1].data = 2
fn()
assert o[0].data == 1.5
def test_function(self):
x, y, z = inputs()
e = mul(add(x, y), div(x, y))
fn = perform_linker(FunctionGraph([x, y, z], [e])).make_function()
assert fn(1.0, 2.0, 3.0) == 1.5
def test_constant(self):
x, y, z = inputs()
y = Constant(tdouble, 2.0)
e = mul(add(x, y), div(x, y))
fn = perform_linker(FunctionGraph([x], [e])).make_function()
assert fn(1.0) == 1.5
def test_input_output_same(self):
x, y, z = inputs()
fn = perform_linker(FunctionGraph([x], [x])).make_function()
assert 1.0 is fn(1.0)
def test_input_dependency0(self):
x, y, z = inputs()
a, d = add(x, y), div(x, y)
e = mul(a, d)
fn = perform_linker(FunctionGraph(*graph.clone([x, y, a],
[e]))).make_function()
assert fn(1.0, 2.0, 9.0) == 4.5
def test_skiphole(self):
x, y, z = inputs()
a = add(x, y)
r = raise_err(a)
e = add(r, a)
fn = perform_linker(FunctionGraph(*graph.clone([x, y, r],
[e]))).make_function()
assert fn(1.0, 2.0, 4.5) == 7.5
def wrap_linker(fgraph, linkers, wrapper):
lnk = WrapLinker(linkers, wrapper).accept(fgraph)
return lnk
class TestWrapLinker(unittest.TestCase):
def test_0(self):
nodes = []
def wrap(i, node, th):
nodes.append(node.op)
x, y, z = inputs()
e = mul(add(x, y), div(x, y))
fn, i, o = wrap_linker(
FunctionGraph([x, y, z], [e]),
[PerformLinker(allow_gc=False)], wrap).make_thunk()
i[0].data = 1
i[1].data = 2
fn()
assert nodes == [div, add, mul]
assert o[0].data is None
def test_1(self):
nodes = []
def wrap(i, node, th):
nodes.append(node.op)
th()
x, y, z = inputs()
e = mul(add(x, y), div(x, y))
fn, i, o = wrap_linker(
FunctionGraph([x, y, z], [e]),
[PerformLinker(allow_gc=False)], wrap).make_thunk()
i[0].data = 1
i[1].data = 2
fn()
assert nodes == [div, add, mul]
assert o[0].data == 1.5
def test_sort_schedule_fn():
import theano
from theano.gof.sched import sort_schedule_fn, make_depends
x = theano.tensor.matrix('x')
y = theano.tensor.dot(x[:5] * 2, x.T + 1).T
def str_cmp(a, b):
return cmp(str(a), str(b)) # lexicographical sort
linker = theano.OpWiseCLinker(schedule=sort_schedule_fn(str_cmp))
mode = theano.Mode(linker=linker)
f = theano.function((x,), (y,), mode=mode)
nodes = f.maker.linker.make_all()[-1]
depends = make_depends()
for a, b in zip(nodes[:-1], nodes[1:]):
if not depends((b, a)):
assert str(a) < str(b)
def test_container_deepcopy():
"""
This is a test to a work around a NumPy bug.
"""
t = theano.tensor.scalar()
# It seam that numpy.asarray(0.).astype(floatX) can return a numpy
# scalar with some NumPy Version. So we call numpy.asarray with
# the dtype parameter.
v = numpy.asarray(0., dtype=theano.config.floatX)
assert isinstance(v, numpy.ndarray), type(v)
for readonly in [True, False]:
c = Container(t, [v], readonly=readonly)
assert isinstance(c.storage[0], numpy.ndarray), (c.storage[0],
type(c.storage[0]))
assert c.storage[0].dtype == v.dtype, (c.storage[0].dtype, v.dtype)
assert c.storage[0].dtype == c.type.dtype, (c.storage[0].dtype,
c.type.dtype)
d = deepcopy(c)
assert isinstance(d.storage[0], numpy.ndarray), (d.storage[0],
type(d.storage[0]))
assert d.storage[0].dtype == v.dtype, (d.storage[0].dtype, v.dtype)
assert d.storage[0].dtype == c.type.dtype, (d.storage[0].dtype,
c.type.dtype)
| [
"andreeadeac22@gmail.com"
] | andreeadeac22@gmail.com |
92a0083f4b08b19f5b654fa7d95e7f338f8c2fb2 | 11812a0cc7b818292e601ecdd4aa4c4e03d131c5 | /05_mysql数据库/mysql_11_python操作sql.py | b9c61a25fa7fb31800ebc2dccae4649513085046 | [] | no_license | SunshineFaxixi/Python_Learning | f1e55adcfa898489cc9146ccfb220f0b48a31a22 | ab3ca44d013311b6de02124091acc4c36a83c4d9 | refs/heads/master | 2021-08-16T05:47:29.963118 | 2021-01-04T13:48:30 | 2021-01-04T13:48:30 | 238,857,341 | 1 | 0 | null | 2020-03-03T13:53:08 | 2020-02-07T06:21:46 | HTML | UTF-8 | Python | false | false | 705 | py | #coding:utf-8
from pymysql import *
def main():
# 创建Connection连接
conn = connect(host='localhost', port=3306, user='root', password='mysql', database='jingdong', charset='utf8')
# 获得Cursor对象
cs1 = conn.cursor()
# 执行select语句,并返回受影响的行数:查询一条数据
count = cs1.execute('select * from goods;')
# 打印受影响的行数
print(count)
for i in range(count):
# 获取查询的结果
result = cs1.fetchone() # 查询一条数据
print(result)
# cs1.fetchmany(m) # 最多查询m条
# cs1.fetchall() # 查询所有
cs1.close()
conn.close()
if __name__ == "__main__":
main()
| [
"xxhan2018@163.com"
] | xxhan2018@163.com |
532bcf8ae0ee40dc3eb4bd7170acfcb5d21cc4b9 | 1f2ade8c03a076ca774f6a535082fd2d3a468f96 | /txchat/txchat/client.py | 1ca81dc2af9c7e598c35066f4a5466ea6de2c38f | [] | no_license | kneufeld/chatroom | a48668e0b1210004d0803afd637cc877a610555c | 3b3c46abdc525309d3a39f04041748dddbefb54b | refs/heads/master | 2021-01-19T00:15:59.558234 | 2015-03-28T14:16:22 | 2015-03-28T14:16:22 | 32,901,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,803 | py | from __future__ import print_function
import os
from twisted.internet.task import react
from twisted.internet.defer import Deferred, inlineCallbacks
from twisted.internet.protocol import Factory
from twisted.internet.protocol import Protocol
from twisted.internet.endpoints import TCP4ClientEndpoint, connectProtocol
from twisted.protocols.basic import LineReceiver
import msgpack
class ChatClient(Protocol):
def __init__(self, done):
self.done = done
self.unpacker = msgpack.Unpacker()
def connectionLost(self, reason):
print(reason.getErrorMessage())
self.done.callback(reason)
def sendMessage(self, nick, msg):
print("sending", nick, msg)
data = msgpack.packb([nick, msg])
self.transport.write(data)
def dataReceived(self, data):
# ditto to server: go over what about "burst" messages?
# (and do "original" code here at first: msg = msgpack.unpack(data)
self.unpacker.feed(data)
for msg in self.unpacker:
print("{}: {}".format(*msg))
class StdIOFactory(Factory):
def __init__(self, nick, proto):
self.nick = nick
self.proto = proto
def buildProtocol(self, addr):
return StandardInput(self.nick, self.proto)
from twisted.internet.stdio import StandardIO
class StandardInput(LineReceiver, StandardIO):
'''
Reads stdin and writes every line received as a message to the
server. No fancy editing or anything, simple pipe.
'''
delimiter = os.linesep
def lineReceived(self, line):
return self.protocol.sendMessage(self.nick, line)
def __init__(self, nick, proto):
self.nick = nick
self.protocol = proto
def connectionLost(self, reason):
self.protocol.transport.loseConnection()
| [
"meejah@meejah.ca"
] | meejah@meejah.ca |
267652c39768dc94e7405da3455bcd2af9d2d01d | 2e6c379a22e87ad15f6d9c0356e615f42609e0eb | /Hackerrank/WeekOfCode/WoC28/TheValueOfFriendship.py | 43ce3207290cb8ed36d4c9c1081da448586bf012 | [] | no_license | opethe1st/CompetitiveProgramming | 49f24b1b0c6bf737c5698a15edfdf5009a308a52 | 84ab62144f6b389ef74b7e8956b7e02e0f2ab108 | refs/heads/master | 2021-01-13T10:35:08.339291 | 2020-09-14T21:23:34 | 2020-09-14T21:23:34 | 69,969,077 | 7 | 2 | null | 2019-02-17T18:36:34 | 2016-10-04T13:46:21 | Python | UTF-8 | Python | false | false | 1,573 | py | import time
countnodes=0
def dfs(node):
global G
global countnodes
stack = [node]
while stack:
vertex = stack.pop()
if not visited[vertex]:
visited[vertex]=True
countnodes+=1
for v in G[vertex]:
stack.append(v)
start = time.time()
def valuefriendship(n):
return n*(2*n-1)*(n-1)/6+(n)*(n-1)/2
t = int(raw_input().strip())
for a0 in xrange(t):
n,m = raw_input().strip().split(' ')
n,m = [int(n),int(m)]
G = [[] for i in xrange(n)]
for a1 in xrange(m):
x,y = raw_input().strip().split(' ')
x,y = [int(x),int(y)]
G[x-1].append(y-1)
G[y-1].append(x-1)
visited=[False]*(n+1)
#count the number of clusters
total = 0
ncomponents = 0
components = []
for node in xrange(n):
if not visited[node]:
ncomponents+=1
countnodes = 0
dfs(node)
components.append(countnodes)
components.sort()
#print components,ncomponents
for i in xrange(ncomponents):
total+=valuefriendship(components[i])
#print total
#print total
prefixs = [0]*(len(components)+1)
for i in xrange(len(components)):
prefixs[i+1]=prefixs[i]+components[i]-1
#print total,prefixs
for i in xrange(len(components)):
#print prefixs[i],(components[i]-1)*(components[i])
total+=prefixs[i]*(components[i]-1)*(components[i])
total+=(m-n+ncomponents)*sum([v*(v-1) for v in components])
print total
print time.time()-start | [
"ogunks@live.com"
] | ogunks@live.com |
b2c41e0470d3181e98c0afa54ddfe1b17e35d7ff | 8e67d8618b9be7c777597b650876fa20082a6ebb | /13458.py | a3123478b53535d2918cc1bd0af2ce75faf80ce4 | [] | no_license | ljm9748/practice_codingtest | c5a2cc315c1ccd8f48a9424d13d2097c9fed0efc | 367710238976c1a2f8b42bfc3fc2936c47b195c5 | refs/heads/master | 2023-01-14T12:29:32.530648 | 2020-11-18T17:49:50 | 2020-11-18T17:49:50 | 282,162,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | roomnum=int(input())
people=input().split()
main, sub= input().split()
main=int(main)
sub=int(sub)
ans=0
for peop in people:
#print(ans)
ans += 1
if int(peop)<=main:
continue
else:
peop=int(peop)-main
if peop%sub != 0:
ans += 1
ans += peop//sub
print(ans) | [
"ljm9748@naver.com"
] | ljm9748@naver.com |
73de5d483b19502d99934dc40b1b21fc3d6c082a | c340835e4444c664fc2b261238e3738cf5bf7196 | /climbing_stairs.py | 98366235b73f864768577ea21c1769c0b7cb60f0 | [] | no_license | z-o-e/LeetCode_OJ_Python | 49f2a7378eb98f707c97d7757cc19ef19622db42 | ad7f5152fe404bdd4e91710d9a719f392bec7a96 | refs/heads/master | 2021-03-12T22:39:44.898502 | 2014-10-16T05:30:25 | 2014-10-16T05:30:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188 | py | class Solution:
# @param n, an integer
# @return an integer
def climbStairs(self,n):
prev = 0
cur = 1
for i in range(n):
tmp = cur
cur = cur+prev
prev = tmp
return cur
| [
"hyzhang22@gmail.com"
] | hyzhang22@gmail.com |
dbc8d7557f6dfcfbf5a27d1443b30db0fc155508 | 5cb924f15ae0f6d44c5b4a521ebf72c4617936c2 | /py/plugins/portscan.py | f7b4e776c8ca9b4644c0596ea32d640da90d8421 | [] | no_license | T0gether-zzz/w8scan | 1e0b8a901d98e5e774817081acacf89bda0a0fd9 | 14c403255f491b8d998ecc74041b5ccdb7fb2b47 | refs/heads/master | 2022-12-21T05:08:51.684366 | 2022-12-09T02:59:22 | 2022-12-09T02:59:22 | 221,867,910 | 0 | 0 | null | 2019-11-15T07:25:37 | 2019-11-15T07:25:34 | null | UTF-8 | Python | false | false | 946 | py | # 端口扫描插件
import socket,time,thread
import urlparse
class PortScan:
def __init__(self,ip):
socket.setdefaulttimeout(3)
self.lock = thread.allocate_lock()
self.ip = ip
try:
for i in range(0, 65530):
thread.start_new_thread(self.socket_port, (ip, int(i)))
except:
pass
time.sleep(4)
def socket_port(self,ip, port):
try:
if port >= 65535:
print u'port end'
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = s.connect_ex((ip, port))
if result == 0:
self.lock.acquire()
print ip, ':', port, 'open'
report.add_list("端口",port)
self.lock.release()
s.close()
except:
pass
ip = gethostbyname(_U)
print "Start scan port -> IP:",ip
PortScan(ip)
report.send() | [
"34109680@qq.com"
] | 34109680@qq.com |
e850915077752409ad3bca8b330fb86f06e9bb35 | fb65b7c000642dca68c93ee85a87795b3f30fe21 | /Advance_Python/Quantifiers/Rule7.py | b766e7eae0c3cbae746a9b99174676e917c49ba9 | [] | no_license | toncysara17/luminarpythonprograms | f41b446251feba641e117d87ce235dc556086f8f | 17bc37c3f83c0e9792aaa8bccd901371a6413f14 | refs/heads/master | 2023-04-17T18:51:31.493118 | 2021-04-20T05:25:02 | 2021-04-20T05:25:02 | 358,550,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | #Quantifiers
import re
x = "a$" #check ending with a
r="aaa abc aaaa cga"
matcher=re.finditer(x,r)
for match in matcher:
print(match.start())
print(match.group()) | [
"toncysara12@gmail.com"
] | toncysara12@gmail.com |
455687993066193da245230356ec83ca3ed75ffa | 380a47268c5975473a2e7c38c747bc3bdbd981b1 | /benchmark/third_party/DeepSpeed/csrc/aio/py_test/ds_aio_basic.py | cf70b6655e9c1366371d24a6fb33808c41729e93 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | FMInference/FlexGen | 07aa9b1918c19b02077e13ad07e76840843810dd | d34f7b4b43ed87a374f394b0535ed685af66197b | refs/heads/main | 2023-07-24T02:29:51.179817 | 2023-07-21T22:38:31 | 2023-07-21T22:38:31 | 602,270,517 | 6,821 | 411 | Apache-2.0 | 2023-07-07T22:59:24 | 2023-02-15T21:18:53 | Python | UTF-8 | Python | false | false | 4,262 | py | """
Copyright 2020 The Microsoft DeepSpeed Team
Licensed under the MIT license.
Functionality of swapping optimizer tensors to/from (NVMe) storage devices.
"""
import torch
import os
import time
from deepspeed.ops.aio import AsyncIOBuilder
from multiprocessing import Pool, Barrier
from test_ds_aio_utils import report_results, task_log, task_barrier
def pre_basic(args, tid, read_op):
io_string = "Read" if read_op else "Write"
num_bytes = os.path.getsize(args.read_file) if read_op else args.write_size
file = args.read_file if read_op else f'{args.write_file}.{tid}'
task_log(tid, f'Allocate tensor of size {num_bytes} bytes')
buffer = torch.empty(num_bytes, dtype=torch.uint8, device='cpu').pin_memory()
task_log(
tid,
f'{io_string} file {file} of size {num_bytes} bytes from buffer on device {buffer.device}'
)
ctxt = {}
ctxt['file'] = file
ctxt['num_bytes'] = num_bytes
ctxt['buffer'] = buffer
ctxt['elapsed_sec'] = 0
return ctxt
def pre_basic_read(pool_params):
args, tid = pool_params
ctxt = pre_basic(args, tid, True)
return ctxt
def pre_basic_write(pool_params):
args, tid = pool_params
ctxt = pre_basic(args, tid, False)
return ctxt
def post_basic(pool_params):
_, _, ctxt = pool_params
ctxt["buffer"].detach()
ctxt["buffer"] = None
return ctxt
def main_basic_read(pool_params):
args, tid, ctxt = pool_params
start_time = time.time()
AsyncIOBuilder().load().aio_read(ctxt['buffer'],
ctxt['file'],
args.block_size,
args.queue_depth,
args.single_submit,
args.overlap_events,
args.validate)
end_time = time.time()
ctxt['elapsed_sec'] += end_time - start_time
return ctxt
def main_basic_write(pool_params):
args, tid, ctxt = pool_params
start_time = time.time()
AsyncIOBuilder().load().aio_write(ctxt['buffer'],
ctxt['file'],
args.block_size,
args.queue_depth,
args.single_submit,
args.overlap_events,
args.validate)
end_time = time.time()
ctxt['elapsed_sec'] += end_time - start_time
return ctxt
def get_schedule(args, read_op):
schedule = {}
if read_op:
schedule['pre'] = pre_basic_read
schedule['post'] = post_basic
schedule['main'] = main_basic_read
else:
schedule['pre'] = pre_basic_write
schedule['post'] = post_basic
schedule['main'] = main_basic_write
return schedule
def _aio_handle_tasklet(pool_params):
args, tid, read_op = pool_params
# Create schedule
schedule = get_schedule(args, read_op)
task_log(tid, f'schedule = {schedule}')
task_barrier(aio_barrier, args.threads)
# Run pre task
task_log(tid, f'running pre-task')
ctxt = schedule["pre"]((args, tid))
task_barrier(aio_barrier, args.threads)
# Run main tasks in a loop
ctxt["main_task_sec"] = 0
for i in range(args.loops):
task_log(tid, f'running main task {i}')
start_time = time.time()
ctxt = schedule["main"]((args, tid, ctxt))
task_barrier(aio_barrier, args.threads)
stop_time = time.time()
ctxt["main_task_sec"] += stop_time - start_time
# Run post task
task_log(tid, f'running post-task')
ctxt = schedule["post"]((args, tid, ctxt))
task_barrier(aio_barrier, args.threads)
return ctxt["main_task_sec"], ctxt["elapsed_sec"], ctxt["num_bytes"] * args.loops
def _init_tasklet(b):
global aio_barrier
aio_barrier = b
def aio_basic_multiprocessing(args, read_op):
b = Barrier(args.threads)
pool_params = [(args, p, read_op) for p in range(args.threads)]
with Pool(processes=args.threads, initializer=_init_tasklet, initargs=(b, )) as p:
pool_results = p.map(_aio_handle_tasklet, pool_params)
report_results(args, read_op, pool_results)
| [
"sqy1415@gmail.com"
] | sqy1415@gmail.com |
04bad3e7e96c1f7304a49effe7f1aa4d78526132 | b84842cfa24fce5b1a8d093bdf45885b0f5ab434 | /configuration/logging/main.py | 9b861944bf3350ed922aabc8c501e78db5a6a4ee | [] | no_license | CodedQuen/Programming-Google-App-Engine | fc0f4572a60f3d91f08a15f2b2d19d71673d2de6 | bd3430b84db6477737a7332a358ed37a6ea36b23 | refs/heads/master | 2022-04-25T01:14:08.620872 | 2020-04-30T07:02:26 | 2020-04-30T07:02:26 | 260,136,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | import datetime
import logging
import sys
import webapp2
class MainPage(webapp2.RequestHandler):
def get(self):
logging.debug('debug level')
logging.info('info level')
logging.warning('warning level')
logging.error('error level')
logging.critical('critical level')
sys.stderr.write('stderr write, logged at the error level\n')
self.response.write('<p>Messages logged.</p>')
self.response.write('<p>The time is: %s</p>'
% str(datetime.datetime.now()))
app = webapp2.WSGIApplication([('/', MainPage)], debug=True)
| [
"noreply@github.com"
] | CodedQuen.noreply@github.com |
6df27e5c03baf10c8e272457985a7ddc038b2e7c | a08176ef0bf654635e4c7ffd83ee1cf58b7c7bcb | /yk_blog/models.py | 66b4ce07724e3766304da2c083698ef37d0fa09a | [] | no_license | yangkun6/blog | e88e5dcfd46db7cce1d6459a524c4d1094ec625c | fb296ed9afc575ef4ba3f1a38839738e351cf708 | refs/heads/master | 2020-03-07T10:08:29.669959 | 2018-03-30T12:12:22 | 2018-03-30T12:12:22 | 127,409,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,287 | py | # coding:utf-8
from django.db import models
from ckeditor_uploader.fields import RichTextUploadingField
# Create your models
class Author(models.Model):#作者表
name = models.CharField(max_length=32,verbose_name='作者姓名')
email = models.EmailField(verbose_name='作者邮箱')
age = models.IntegerField(verbose_name='作者年龄',blank=True,null=True)
gender = models.CharField(max_length=32,verbose_name='性别',blank=True)
phone = models.CharField(max_length=32,verbose_name='手机',blank=True)
address = models.CharField(max_length=32,verbose_name='地址',blank=True)
photo = models.ImageField(upload_to='images',verbose_name='作者照片',blank=True)
description = RichTextUploadingField(verbose_name='作者描述',blank=True)
def __unicode__(self):
return self.name
class Article(models.Model):
title = models.CharField(max_length=32,verbose_name='文章标题')
time = models.DateField(max_length=32, verbose_name='发表日期')
author = models.ForeignKey(Author)
image = models.ImageField(upload_to='images',verbose_name='文章图片',blank=True)
content= RichTextUploadingField(verbose_name='文章内容',blank=True)
description = RichTextUploadingField(verbose_name='文章描述',blank=True)
| [
"you@example.com"
] | you@example.com |
8af9315cf579a0691f62583d1dc67022321befbe | 4dddd01ca6a60f2fa408ee55fbaebe868917184a | /myinterface/websockets/ws_client.py | 2b5945dff8ae8103a4e303b7bfc914ce59c72e7a | [] | no_license | fanpl-sourse/all_study_practice | 6f056c18f0eb7afd6af649e5b595895683bb0cbd | b02f25231855e149b95476b20dd8d53318cfe1a5 | refs/heads/master | 2023-01-22T09:13:15.107616 | 2020-11-30T05:53:43 | 2020-11-30T05:53:43 | 304,493,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | # -*- coding: utf-8 -*-
# @Time : 2020/11/21 16:49
# @Author : 饭盆里
# @File : ws_client.py
# @Software: PyCharm
# @desc :
import asyncio
import websockets
async def hello():
uri = 'ws://127.0.0.1:8211'
async with websockets.connect(uri) as websocket:
name = input("you are ? ")
await websocket.send(name)
print(f">>{name}")
greeting = await websocket.recv()
print(f"<<{greeting}")
asyncio.get_event_loop().run_until_complete(hello())
| [
"fanpengli@fangdd.com"
] | fanpengli@fangdd.com |
b93a7133e53b462bb76941328281ba9cd0b2d03a | 4c455970572cd1531f0df4cd1705f655c00f8356 | /beegarden/themes/default/__init__.py | b5ddd8c4bebc6502bec74dda9412e916fb71f127 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | Fugachi/beegarden | 44dd8d016471cb1a3d9c12d6a7f41aad2d05cf67 | a0278a9e227b7f02c3ed5d46b41deb4da6d399a8 | refs/heads/master | 2020-05-01T23:44:40.688988 | 2016-05-30T09:46:13 | 2016-05-30T09:46:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | # -*- coding: utf-8 -*-
import os
PICTURES_PATH = os.path.dirname(__file__)
BACKGROUND_COLOR = (85, 107, 47)
FIELD_WIDTH = 1200
FIELD_HEIGHT = 600
METER_1_COLOR = (0, 255, 70)
METER_2_COLOR = (232, 129, 31)
TEAMS_COUNT = 4
DEBUG = False
MAX_HEALTH = 100
STING_POWER = 50
HEALTH_TOP_UP_SPEED = 0.5
BEEHIVE_SAFE_DISTANCE = 200
# See robogame_engine.constants
| [
"suguby@gmail.com"
] | suguby@gmail.com |
1cf095e0a7d126f06a9fbe8c6dcf2b1c5f89315f | 82149a84b47fb37238452a658d5f3a8f23342658 | /Launchpad_MK2/Colors.py | f7007f55023226272f5808cddfb4dd533ca8cf10 | [] | no_license | maratbakirov/AbletonLive11_MIDIRemoteScripts | 408c90522d8f54b878e595b0d8af28ad5008a4a1 | 2b25ba9351764c49f7fd1f99875e28e67b002f30 | refs/heads/master | 2023-04-12T04:44:16.715220 | 2021-05-09T11:48:24 | 2021-05-09T11:48:24 | 365,708,395 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,777 | py | #Embedded file name: /Users/versonator/Jenkins/live/output/Live/mac_64_static/Release/python-bundle/MIDI Remote Scripts/Launchpad_MK2/Colors.py
from __future__ import absolute_import, print_function, unicode_literals
from builtins import object
from _Framework.ButtonElement import Color
from .consts import BLINK_LED_CHANNEL, PULSE_LED_CHANNEL
class Blink(Color):
def __init__(self, midi_value = 0, *a, **k):
super(Blink, self).__init__(midi_value, *a, **k)
def draw(self, interface):
interface.send_value(0)
interface.send_value(self.midi_value, channel=BLINK_LED_CHANNEL)
class Pulse(Color):
def __init__(self, midi_value = 0, *a, **k):
super(Pulse, self).__init__(midi_value, *a, **k)
def draw(self, interface):
interface.send_value(0)
interface.send_value(self.midi_value, channel=PULSE_LED_CHANNEL)
class Rgb(object):
BLACK = Color(0)
DARK_GREY = Color(1)
GREY = Color(2)
WHITE = Color(3)
RED = Color(5)
RED_BLINK = Blink(5)
RED_PULSE = Pulse(5)
RED_HALF = Color(7)
ORANGE = Color(9)
ORANGE_HALF = Color(11)
AMBER = Color(96)
AMBER_HALF = Color(14)
YELLOW = Color(13)
YELLOW_HALF = Color(15)
DARK_YELLOW = Color(17)
DARK_YELLOW_HALF = Color(19)
GREEN = Color(21)
GREEN_BLINK = Blink(21)
GREEN_PULSE = Pulse(21)
GREEN_HALF = Color(27)
MINT = Color(29)
MINT_HALF = Color(31)
LIGHT_BLUE = Color(37)
LIGHT_BLUE_HALF = Color(39)
BLUE = Color(45)
BLUE_HALF = Color(47)
DARK_BLUE = Color(49)
DARK_BLUE_HALF = Color(51)
PURPLE = Color(53)
PURPLE_HALF = Color(55)
DARK_ORANGE = Color(84)
LIVE_COLORS_TO_MIDI_VALUES = {10927616: 74,
16149507: 84,
4047616: 76,
6441901: 69,
14402304: 99,
8754719: 19,
16725558: 5,
3947580: 71,
10056267: 15,
8237133: 18,
12026454: 11,
12565097: 73,
13381230: 58,
12243060: 111,
16249980: 13,
13013643: 4,
10208397: 88,
695438: 65,
13821080: 110,
3101346: 46,
16749734: 107,
8962746: 102,
5538020: 79,
13684944: 117,
15064289: 119,
14183652: 94,
11442405: 44,
13408551: 100,
1090798: 78,
11096369: 127,
16753961: 96,
1769263: 87,
5480241: 64,
1698303: 90,
16773172: 97,
7491393: 126,
8940772: 80,
14837594: 10,
8912743: 16,
10060650: 105,
13872497: 14,
16753524: 108,
8092539: 70,
2319236: 39,
1716118: 47,
12349846: 59,
11481907: 121,
15029152: 57,
2490280: 25,
11119017: 112,
10701741: 81,
15597486: 8,
49071: 77,
10851765: 93,
12558270: 48,
32192: 43,
8758722: 103,
10204100: 104,
11958214: 55,
8623052: 66,
16726484: 95,
12581632: 86,
13958625: 28,
12173795: 115,
13482980: 116,
16777215: 3,
6094824: 33,
13496824: 114,
9611263: 92,
9160191: 36}
RGB_COLOR_TABLE = ((0, 0),
(1, 1973790),
(2, 8355711),
(3, 16777215),
(4, 16731212),
(5, 16711680),
(6, 5832704),
(7, 1638400),
(8, 16760172),
(9, 16733184),
(10, 5840128),
(11, 2562816),
(12, 16777036),
(13, 16776960),
(14, 5855488),
(15, 1644800),
(16, 8978252),
(17, 5570304),
(18, 1923328),
(19, 1321728),
(20, 5046092),
(21, 65280),
(22, 22784),
(23, 6400),
(24, 5046110),
(25, 65305),
(26, 22797),
(27, 6402),
(28, 5046152),
(29, 65365),
(30, 22813),
(31, 7954),
(32, 5046199),
(33, 65433),
(34, 22837),
(35, 6418),
(36, 5030911),
(37, 43519),
(38, 16722),
(39, 4121),
(40, 5015807),
(41, 22015),
(42, 7513),
(43, 2073),
(44, 5000447),
(45, 255),
(46, 89),
(47, 25),
(48, 8867071),
(49, 5505279),
(50, 1638500),
(51, 983088),
(52, 16731391),
(53, 16711935),
(54, 5832793),
(55, 1638425),
(56, 16731271),
(57, 16711764),
(58, 5832733),
(59, 2228243),
(60, 16717056),
(61, 10040576),
(62, 7950592),
(63, 4416512),
(64, 211200),
(65, 22325),
(66, 21631),
(67, 255),
(68, 17743),
(69, 2425036),
(70, 8355711),
(71, 2105376),
(72, 16711680),
(73, 12451629),
(74, 11529478),
(75, 6618889),
(76, 1084160),
(77, 65415),
(78, 43519),
(79, 11007),
(80, 4129023),
(81, 7995647),
(82, 11672189),
(83, 4202752),
(84, 16730624),
(85, 8970502),
(86, 7536405),
(87, 65280),
(88, 3931942),
(89, 5898097),
(90, 3735500),
(91, 5999359),
(92, 3232198),
(93, 8880105),
(94, 13835775),
(95, 16711773),
(96, 16744192),
(97, 12169216),
(98, 9502464),
(99, 8609031),
(100, 3746560),
(101, 1330192),
(102, 872504),
(103, 1381674),
(104, 1450074),
(105, 6896668),
(106, 11010058),
(107, 14569789),
(108, 14182940),
(109, 16769318),
(110, 10412335),
(111, 6796559),
(112, 1973808),
(113, 14483307),
(114, 8454077),
(115, 10131967),
(116, 9332479),
(117, 4210752),
(118, 7697781),
(119, 14745599),
(120, 10485760),
(121, 3473408),
(122, 1757184),
(123, 475648),
(124, 12169216),
(125, 4141312),
(126, 11755264),
(127, 4920578))
| [
"mbakirov@hotmail.com"
] | mbakirov@hotmail.com |
f326d1b94a0865f09b980a7fd082d1c64569f705 | aa4024b6a846d2f6032a9b79a89d2e29b67d0e49 | /GM2AUTOSAR_MM/transformation/faulty_from_DSLTrans/HConnVirtualDevice2Distributable1.py | 283d3616c656a6fc1b7cb1c13e3a3d535d46bbfd | [
"MIT"
] | permissive | levilucio/SyVOLT | 41311743d23fdb0b569300df464709c4954b8300 | 0f88827a653f2e9d3bb7b839a5253e74d48379dc | refs/heads/master | 2023-08-11T22:14:01.998341 | 2023-07-21T13:33:36 | 2023-07-21T13:33:36 | 36,246,850 | 3 | 2 | MIT | 2023-07-21T13:33:39 | 2015-05-25T18:15:26 | Python | UTF-8 | Python | false | false | 4,703 | py | from core.himesis import Himesis
import uuid
class HConnVirtualDevice2Distributable1(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule ConnVirtualDevice2Distributable1.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HConnVirtualDevice2Distributable1, self).__init__(name='HConnVirtualDevice2Distributable1', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """ConnVirtualDevice2Distributable1"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'ConnVirtualDevice2Distributable1')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
self.vs[2]["attr1"] = """ConnVirtualDevice2Distributable1"""
# match class PhysicalNode() node
self.add_node()
self.vs[3]["mm__"] = """PhysicalNode"""
self.vs[3]["attr1"] = """+"""
# match class Partition() node
self.add_node()
self.vs[4]["mm__"] = """Partition"""
self.vs[4]["attr1"] = """+"""
# match class Module() node
self.add_node()
self.vs[5]["mm__"] = """Module"""
self.vs[5]["attr1"] = """+"""
# apply class CompositionType() node
self.add_node()
self.vs[6]["mm__"] = """CompositionType"""
self.vs[6]["attr1"] = """1"""
# apply class ComponentPrototype() node
self.add_node()
self.vs[7]["mm__"] = """ComponentPrototype"""
self.vs[7]["attr1"] = """1"""
# match association PhysicalNode--partition-->Partition node
self.add_node()
self.vs[8]["attr1"] = """partition"""
self.vs[8]["mm__"] = """directLink_S"""
# match association Partition--module-->Module node
self.add_node()
self.vs[9]["attr1"] = """module"""
self.vs[9]["mm__"] = """directLink_S"""
# apply association CompositionType--component-->ComponentPrototype node
self.add_node()
self.vs[10]["attr1"] = """component"""
self.vs[10]["mm__"] = """directLink_T"""
# apply association ComponentPrototype--type-->CompositionType node
self.add_node()
self.vs[11]["attr1"] = """type"""
self.vs[11]["mm__"] = """directLink_T"""
# backward association PhysicalNode---->CompositionType node
self.add_node()
self.vs[12]["mm__"] = """backward_link"""
# backward association Module---->ComponentPrototype node
self.add_node()
self.vs[13]["mm__"] = """backward_link"""
# Add the edges
self.add_edges([
(0,3), # matchmodel -> match_class PhysicalNode()
(0,4), # matchmodel -> match_class Partition()
(0,5), # matchmodel -> match_class Module()
(1,6), # applymodel -> -> apply_class CompositionType()
(1,7), # applymodel -> -> apply_class ComponentPrototype()
(3,8), # match_class PhysicalNode() -> association partition
(8,4), # association partition -> match_class Partition()
(4,9), # match_class Partition() -> association module
(9,5), # association module -> match_class Module()
(6,10), # apply_class CompositionType() -> association component
(10,7), # association component -> apply_class ComponentPrototype()
(7,11), # apply_class ComponentPrototype() -> association type
(11,6), # association type -> apply_class CompositionType()
(6,12), # apply_class CompositionType() -> backward_association
(12,3), # backward_association -> apply_class PhysicalNode()
(7,13), # apply_class ComponentPrototype() -> backward_association
(13,5), # backward_association -> apply_class Module()
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
# Add the attribute equations
self["equations"] = [((6,'ApplyAttribute'),('constant','solveRef')), ((7,'ApplyAttribute'),('constant','solveRef')), ]
| [
"bentleyjoakes@gmail.com"
] | bentleyjoakes@gmail.com |
d07b13aa2c6b8bf613b9ea8f09b1478290186bb4 | 65459c30edb8e4819c137c8456bb12a964898316 | /venv/Scripts/pip-script.py | db814aa837aa91ef055c87cd7a26f5a445fc44c7 | [] | no_license | MiscCoding/2ndOrderLanguageModel | e308a58f53631b0c56f87a2800ba912740d1d480 | 682e721c982efb841167861cd335bf126eed4d6c | refs/heads/master | 2021-01-05T03:18:51.071115 | 2020-02-16T10:02:43 | 2020-02-16T10:02:43 | 240,859,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | #!C:\Users\neoge_nb4nkvy\PycharmProjects\2ndOrderLanguageModel\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"neogeo-s@hanmail.net"
] | neogeo-s@hanmail.net |
d25f3bdabdb9ef8342ec2eb53ca8bf68dc56cceb | b47c45b8cedac82c0b669e56795bce1cc50be5f5 | /save.py | b5c00a168a076c436f1303613584d56afd52e6b3 | [] | no_license | westscz/Maxdila | 07075271521eb9f7cae9cfc6ee2974962b915846 | 8d34346345624dfa558ecb556d74c5a5455e94d3 | refs/heads/master | 2021-06-24T22:13:36.160679 | 2020-11-11T13:26:17 | 2020-11-11T13:26:17 | 154,186,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,461 | py | from pprint import pprint
class Save:
morale_values = ['?', '?', 'słabe', 'średnie', 'dobre', 'wysokie', 'bardzo wysokie'] # 12 ?! WTF
def get_next_int(self, open_file):
return int(next(open_file))
def get_next_bool(self, open_file):
return next(open_file)[:-1] is not 'FALSE'
def load_save(self, save_path):
with open("/home/jarek/Projects/Maxdila/save.txt") as save_file:
self.day_limit = self.get_next_int(save_file)
self.flags = [self.get_next_bool(save_file) for n in range(8)]
self.player_name = next(save_file)
self.day = self.get_next_int(save_file)
self.store_available = self.get_next_int(save_file)
self.store_all = self.get_next_int(save_file)
self.cash = self.get_next_int(save_file)
self.deposit = self.get_next_int(save_file)
self.debt = self.get_next_int(save_file)
self.respect = self.get_next_int(save_file)
self.life = self.get_next_int(save_file)
self.undefined = next(save_file)
self.items_name = [next(save_file) for n in range(10)] # Gun, Kalach, Granat, Shurikan
self.items_quantity = [next(save_file) for n in range(10)] # Peski do gnata, magazynek, granat, shurikan
self.undefined2 = next(save_file)
self.gang_price = self.get_next_int(save_file)
self.morale = self.get_next_int(save_file) #self.morale_values[self.get_next_int(save_file)]
self.result = self.get_next_int(save_file)
self.gang_name = next(save_file)
self.drugs_name = [next(save_file) for n in range(10)]
self.drugs_available = [next(save_file) for n in range(10)]
self.drugs_bag_order = [next(save_file) for n in range(10)]
self.drugs_bag_price = [next(save_file) for n in range(10)]
self.undefined3 = [next(save_file) for n in range(10)]
self.drugs_bag_quantity = [next(save_file) for n in range(10)]
self.undefined4 = [next(save_file) for n in range(10)]
self.drugs_bag_wtf = [next(save_file) for n in range(10)]
self.grass = [self.get_next_int(save_file) for n in range(self.day)]
self.scun = [self.get_next_int(save_file) for n in range(self.day)]
self.hash = [self.get_next_int(save_file) for n in range(self.day)]
self.acid = [self.get_next_int(save_file) for n in range(self.day)]
self.extasy = [self.get_next_int(save_file) for n in range(self.day)]
self.speed = [self.get_next_int(save_file) for n in range(self.day)]
self.heroina = [self.get_next_int(save_file) for n in range(self.day)]
self.cocaine = [self.get_next_int(save_file) for n in range(self.day)]
self.brown_sugar = [self.get_next_int(save_file) for n in range(self.day)]
self.grzybki = [self.get_next_int(save_file) for n in range(self.day)]
if __name__ == '__main__':
s = Save()
s.load_save("/home/jarek/Projects/Maxdila/save.txt")
# pprint(s.__dict__)
from statistics import median
x = {'grass':s.grass, # 20 - 29
'scun':s.scun, # 30 - 44
'hash':s.hash, # 20 - 29
'acid':s.acid, # 20 - 29
'extasy':s.extasy, # 15 - 21
'speed':s.speed, # 28 - 44
'heroine':s.heroina, # 40 - 59
'coca': s.cocaine, # 100 - 149
'sugar':s.brown_sugar, # 40 - 59
'mush': s.grzybki} # 15 - 21
stat = {
'grass':20, # - 29
'scun': 30, #- 44
'hash': 20, # - 29
'acid': 20, # - 29
'extasy': 15, #- 21
'speed':28, # - 44
'heroine':40, # - 59
'coca': 100, # - 149
'sugar':40, # - 59
'mush': 15 # - 21
}
stat2 = {
'grass':29,
'scun':44,
'hash':29,
'acid':29,
'extasy':21,
'speed':44,
'heroine':59,
'coca': 149,
'sugar':59,
'mush': 21,
}
for name, values in x.items():
print("="*10, name, "="*10)
print("Min:", min(values), " Median:", median(values), " Max:", max(values), " Sub:", max(values)-min(values))
ep = len([x for x in values if x<stat[name]])
em = len([x for x in values if x>stat2[name]])
print("Event--: ", ep,"|", format((ep*100)/s.day, '.2f'), "%")
print("Event++: ", em,"|", format((em*100)/s.day, '.2f'), "%")
# print("All", sorted(values))
| [
"(none)"
] | (none) |
8790966e3e341d425086159d4669d433313da05d | 1facb18aaf0b7e3073847acd68aae8b0364dc8a9 | /python/super.py | ce41053b7d1b4d37896502112ef8c5f060358673 | [] | no_license | lopesivan/sandbox | e5abcd0569efccb8bd8442635ef8626a3d3c1f09 | 19f5d18063b1cfdbb413546c4d26fa16d4ead184 | refs/heads/master | 2020-11-26T23:48:47.387595 | 2019-10-06T19:05:10 | 2019-10-06T19:05:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 749 | py | #!/usr/bin/env python
#
# Kudos: http://stackoverflow.com/questions/576169/python-super
#
class A(object):
def routine(self):
print "A.routine()"
class B(A):
def routine(self):
print "B.routine()"
# Note that the following error:
# "super() argument 1 must be type, not classobj"
# Means the super class is a classic class (i.e. not rooted at
# object) and super won't work. You have to use the class name
# explicitly.
# Kudos: http://www.velocityreviews.com/forums/t338909-help-with-super.html
super(B, self).routine()
if __name__ == '__main__':
a = A()
b = B()
print "Calling a.routine:"
a.routine()
print "Calling b.routine:"
b.routine()
| [
"von@vwelch.com"
] | von@vwelch.com |
351a8a11acd0862ea213f00255c2f32d4b0959c5 | 4b167378821bc5b1ff9c154cf331f4dcf7f9f425 | /models/univariate_LSTM_cpu_sgd.py | 3cc6396d86726faea90f87a3060ed7836ac636e3 | [] | no_license | thangbk2209/GGClusterTraceDataAnalysis | eb83e719363d1b3cd264f3c0f63a21defa3970f4 | 2214c713d733e444d808dac3d6671a1229ce7b85 | refs/heads/master | 2021-09-15T21:32:47.151694 | 2018-06-11T04:20:55 | 2018-06-11T04:20:55 | 112,274,195 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,831 | py | # LSTM for international airline passengers problem with window regression framing
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from pandas import read_csv
import math
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error, mean_absolute_error
from keras.callbacks import TensorBoard, EarlyStopping
from keras import optimizers
# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return np.array(dataX), np.array(dataY)
sliding_window=[2,3,4,5]
# load the dataset
# dataframe = read_csv('/home/nguyen/LSTM_GoogleTraceData/data/Fuzzy_data_sampling_617685_metric_10min_datetime_origin.csv', usecols=[0], engine='python', skipfooter=3)
dataframe = read_csv('./data/5_Fuzzy_Mem_sampling_617685_metric_10min_datetime_origin.csv', usecols=[0], engine='python', skipfooter=3)
dataset = dataframe.values
dataset = dataset.astype('float32')
# normalize the dataset
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
for sliding in sliding_window:
batch_size_arr=[8,16,32,64,128]
for batch_size in batch_size_arr:
# split into train and test sets
print "sliding = %s, batch_size = %s"%(sliding, batch_size)
train_size = 2880
# train_size = int(len(dataset)* 0.67)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
# reshape into X=t and Y=t+1
look_back = sliding
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
testY = scaler.inverse_transform([testY])
# reshape input to be [samples, time steps, features]
trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))
testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))
print 'train X,testX'
print trainX
print testX
print trainX[0], trainY[0]
# create and fit the LSTM network
# model 1 layer 4 neural
model1 = Sequential()
model1.add(LSTM(4, activation = 'relu',input_shape=(sliding, 1)))
model1.add(Dense(1, activation = 'relu'))
# model 2 layer 4-2 neural
model2 = Sequential()
model2.add(LSTM(4,return_sequences=True, activation = 'relu',input_shape=(sliding, 1)))
model2.add(LSTM(2, activation = 'relu'))
model2.add(Dense(1, activation = 'relu'))
# model 2 layer 32-4 neural
model3 = Sequential()
model3.add(LSTM(32,return_sequences=True, activation = 'relu',input_shape=(sliding, 1)))
model3.add(LSTM(4, activation = 'relu'))
model3.add(Dense(1, activation = 'relu'))
# model 2 layer 5122-4 neural
# model4 = Sequential()
# model4.add(LSTM(512,return_sequences=True, activation = 'relu',input_shape=(2*sliding, 1)))
# model4.add(LSTM(4, activation = 'relu'))
# model4.add(Dense(1, activation = 'relu'))
# model 2 layer 2-1
model4 = Sequential()
model4.add(LSTM(2, activation = 'relu',input_shape=(sliding, 1)))
model4.add(Dense(1, activation = 'relu'))
# model 3 layer 32-8-2 neural
model5 = Sequential()
model5.add(LSTM(32,return_sequences=True, activation = 'relu',input_shape=(sliding, 1)))
model5.add(LSTM(8, activation = 'relu',return_sequences=True))
model5.add(LSTM(2, activation = 'relu'))
model5.add(Dense(1, activation = 'relu'))
for k in range(6):
if (k==0):
model = model1
elif (k == 1):
model = model2
elif (k == 2):
model = model3
elif (k == 3):
model = model4
elif (k == 4):
model = model5
modelName = "model" + str(k+1)
print modelName
optimizerArr = ['sgd']
for optimize in optimizerArr:
print optimize
sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='mean_squared_error', optimizer = sgd )
history = model.fit(trainX, trainY, epochs=10000, batch_size=batch_size, verbose=2,validation_split=0.25,
callbacks=[EarlyStopping(monitor='loss', patience=20, verbose=1)])
# list all data in history
print(history.history.keys())
# summarize history for accuracy
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
# plt.show()
plt.savefig('results/univariate/cpuFuzzy/testSGD/%s/history_sliding=%s_batchsize=%s_optimize=%s.png'%(modelName,sliding,batch_size,optimize))
# make predictions
testPredict = model.predict(testX)
# invert predictions
testPredictInverse = scaler.inverse_transform(testPredict)
print 'len(testY), len(testPredict)'
print len(testY[0]), len(testPredict)
# calculate root mean squared error
testScoreRMSE = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))
testScoreMAE = mean_absolute_error(testY[0], testPredictInverse[:,0])
print('Test Score: %f RMSE' % (testScoreRMSE))
print('Test Score: %f MAE' % (testScoreMAE))
testDf = pd.DataFrame(np.array(testPredictInverse))
testDf.to_csv('results/univariate/cpuFuzzy/testSGD/%s/testPredictInverse_sliding=%s_batchsize=%s_optimize=%s.csv'%(modelName,sliding,batch_size,optimize), index=False, header=None)
errorScore=[]
errorScore.append(testScoreRMSE)
errorScore.append(testScoreMAE)
errorDf = pd.DataFrame(np.array(errorScore))
errorDf.to_csv('results/univariate/cpuFuzzy/testSGD/%s/error_sliding=%s_batchsize=%s_optimize=%s.csv'%(modelName,sliding,batch_size,optimize), index=False, header=None) | [
"thangbk2209@gmail.com"
] | thangbk2209@gmail.com |
f1db2dabd9c1b0236e6e4707d154c1a0fe07970d | 726ce8dddbb12af1662e002633bfe538ddf77708 | /PyOpenGL-2.0.2.01-py2.5-win32.egg/OpenGL/GL/INTEL/_texture_scissor.py | 58716cdae011748a107b3cdd8ad5a308cb1c6b41 | [] | no_license | bopopescu/BCPy2000-1 | f9264bb020ba734be0bcc8e8173d2746b0f17eeb | 0f877075a846d17e7593222628e9fe49ab863039 | refs/heads/master | 2022-11-26T07:58:03.493727 | 2019-06-02T20:25:58 | 2019-06-02T20:25:58 | 282,195,357 | 0 | 0 | null | 2020-07-24T10:52:24 | 2020-07-24T10:52:24 | null | UTF-8 | Python | false | false | 287 | py | def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__,'_texture_scissor.pyd')
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
| [
"vanessa_kamara@my.uri.edu"
] | vanessa_kamara@my.uri.edu |
01d5bace35a816b11153aba914b0ab855fc0cd46 | 923f707341f7e6a4c86673c52ca796f40638619c | /78. Subsets.py | 428a43bec43d9f2dab952abf114e74f6d2dc7c8d | [] | no_license | Huijuan2015/leetcode_Python_2019 | bb1e54801faa15ee3ef2a7bd7628b6a16033f7c7 | 36c584e8f92a0725bab7a567dfd10b918408627b | refs/heads/master | 2020-04-22T13:31:55.203162 | 2020-03-10T00:00:58 | 2020-03-10T00:00:58 | 170,412,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 953 | py | class Solution(object):
def subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
self.res = []
self.findPath(nums, 0, [])
return self.res
def findPath(self, nums, idx, path):
if idx == len(nums):
self.res.append(path)
return
# add or not add
self.findPath(nums, idx+1, path+[nums[idx]])
self.findPath(nums, idx+1, path)
class Solution(object):
def subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
self.res = []
self.helper(nums, 0, [])
return self.res
def helper(self, nums, start, path):
self.res.append(path)
for i in range(start, len(nums)):
# path.append(nums[i])
self.helper(nums, i+1, path+[nums[i]])
# path.pop()
| [
"huijuan1991@hotmail.com"
] | huijuan1991@hotmail.com |
00832af127aa68ebc142168f593714b835f926de | 16050f8da45e59d13ba183add0b3f3df5a0087ac | /hyp.py | 24269badfe339079e3e6cca4cfdcb17dff5306a1 | [] | no_license | RenzhiDaDa/Rotated-RetinaNet | eee964013a69ce4eb335e1da0c9cc913e9610423 | 611933154dca54ba283e20e7ea2b7067861c43a5 | refs/heads/master | 2023-06-18T19:49:38.350444 | 2021-07-06T12:22:36 | 2021-07-06T12:22:36 | 360,192,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | # lr
lr0: 0.0001
warmup_lr: 0.00001
warm_epoch:1
# setting
num_classes: 1
# training
epochs: 100
batch_size: 1
save_interval: 50
test_interval: 1
| [
"chaser.ming@gmail.com"
] | chaser.ming@gmail.com |
a2938fd817b51a36b3d80e38cce6acdf84ad5d04 | 6d768c542399d67e056a8b6cc5e4252227b5fa0c | /drones_project/settings.py | e33cdae30305d1bd8f976db25c13474f3c3a66b4 | [
"MIT"
] | permissive | arminpourbeik/drones-api | cd2e783934e84e041407ab19b61bfbd59b76ea56 | 7acacf15acbe83e6344d46459c080d60e9db7c53 | refs/heads/main | 2023-02-02T15:00:41.674866 | 2020-12-19T19:39:21 | 2020-12-19T19:39:21 | 322,332,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,941 | py | from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "^5uxpp99744md@qubv#_j@r9@y!#9j%*=7+k=$_(3i&+ok#v@!"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
# 3rd party
"crispy_forms",
"rest_framework",
"rest_framework.authtoken",
"django_filters",
# Local
"drones.apps.DronesConfig",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "drones_project.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "drones_project.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
"NAME": "drones",
"USER": "postgres",
"PASSWORD": "admin",
"HOST": "127.0.0.1",
"PORT": "5432",
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = "/static/"
# REST Framework settings
REST_FRAMEWORK = {
"DEFAULT_PAGINATION_CLASS": "drones.custompagination.LimitOffsetPaginationWithUpperBound",
"PAGE_SIZE": 4,
"DEFAULT_FILTER_BACKENDS": (
"django_filters.rest_framework.DjangoFilterBackend",
"rest_framework.filters.OrderingFilter",
"rest_framework.filters.SearchFilter",
),
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.BasicAuthentication",
"rest_framework.authentication.SessionAuthentication",
),
"DEFAULT_THROTTLE_CLASSES": (
"rest_framework.throttling.AnonRateThrottle",
"rest_framework.throttling.UserRateThrottle",
),
"DEFAULT_THROTTLE_RATES": {
"anon": "300/hour",
"user": "100/hour",
"drones": "200/hour",
"pilots": "150/hour",
},
"DEFAULT_VERSIONING_CLASS": "rest_framework.versioning.NamespaceVersioning",
}
| [
"armin.pourbeik@gmail.com"
] | armin.pourbeik@gmail.com |
2361655557e559fd6ca16cb03859745f9352ad54 | f5bba3e7ef74f0c5dda05d093a5e2735a2b91eb9 | /tvrenamr/logs.py | c1d3db42d19c1d1c9dcf3ce92bfd530c5f3d7393 | [
"MIT"
] | permissive | svisser/tvrenamr | 701f589a600631c45ec6122fec9f89cf8395c206 | 139ded052d20ab182bc187fe8f20f294bc6da316 | refs/heads/master | 2021-01-20T16:34:28.158699 | 2014-12-14T16:47:06 | 2014-12-14T16:47:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,379 | py | import logging
import logging.handlers
import os
def convert_log_level(level=26):
"""
Get a numeric log level from a string. The default 26 is for SHORT logs.
:param level
:return level
"""
# annoying but the level can be passed in as None
if not level:
level = 26
levels = {'notset': 0, 'debug': 10, 'info': 20, 'minimal': 22,
'short': 26, 'warning': 30, 'error': 40, 'critical': 50}
if isinstance(level, str):
level = levels.get(level)
return level
def get_log_file(filename=None):
# make sure the log directory exists and place the log file there
if filename == None:
filename = os.path.join(
os.path.expanduser('~'),
'.tvrenamr',
'tvrenamr.log'
)
filename = filename.replace('~', os.path.expanduser('~'))
try:
os.makedirs(os.path.split(filename)[0])
except OSError:
pass
return filename
def start_logging(filename, log_level, quiet=False):
"""
Setup the file logging and start the root logger
"""
filename = get_log_file(filename)
log_level = convert_log_level(log_level)
# add the custom levels
logging.addLevelName(22, 'MINIMAL')
logging.addLevelName(26, 'SHORT')
# setup log file
file_format = '%(asctime)-15s %(levelname)-8s %(name)-11s %(message)s'
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1048576, backupCount=10)
handler.setFormatter(logging.Formatter(file_format, '%Y-%m-%dT%H:%M'))
logging.getLogger().addHandler(handler)
logging.getLogger().setLevel(logging.DEBUG)
if not quiet:
# setup the console logs to debug
# debug
if log_level is 10:
console_format = '%(asctime)-15s %(levelname)-8s %(name)-11s %(message)s'
console_datefmt = '%Y-%m-%d %H:%M'
else:
console_format = '%(message)s'
console_datefmt = ''
console_formatter = logging.Formatter(console_format, console_datefmt)
# define a Handler with the given level and outputs to the console
console = logging.StreamHandler()
console.setLevel(log_level)
# set the console format & attach the handler to the root logger with it.
console.setFormatter(console_formatter)
logging.getLogger().addHandler(console)
| [
"george@ghickman.co.uk"
] | george@ghickman.co.uk |
d8cc1890c94548e4209a03d375edac5ed5970225 | 2cd2e01a948bf360d29d4de1cbfa5c4a076c5bcc | /src/cltkv1/tokenize/old_norse/params.py | 4d76881c269875808039c3a99c34022add0e537b | [
"MIT"
] | permissive | todd-cook/cltkv1 | c4ccf1c2a12fb6c476bcac4373229422bd8a02cc | 90c3daaafda242a1982b38c2b11c52aedab7ddf8 | refs/heads/master | 2022-11-13T02:50:16.523173 | 2020-06-28T02:02:47 | 2020-06-28T02:02:47 | 276,302,479 | 0 | 0 | MIT | 2020-07-01T07:02:59 | 2020-07-01T07:02:59 | null | UTF-8 | Python | false | false | 366 | py | """ Params: Old Norse
"""
__author__ = [
"Clément Besnier <clemsciences@aol.com>",
"Patrick J. Burns <patrick@diyclassics.org>",
]
__license__ = "MIT License."
# As far as I know, hyphens were never used for compounds, so the tokenizer treats all hyphens as line-breaks
OldNorseTokenizerPatterns = [(r"\'", r"' "), (r"(?<=.)(?=[.!?)(\";:,«»\-])", " ")]
| [
"kyle@kyle-p-johnson.com"
] | kyle@kyle-p-johnson.com |
8b06719435aa2032f9ea5c74cda2c8069884c0cc | 2ad52a65c45051f26fe26631a31f80279522ddb7 | /build/turtlebot3_simulations/turtlebot3_gazebo/catkin_generated/pkg.installspace.context.pc.py | 89848cabfeb332104ab3d85d721059b3fe7d7665 | [] | no_license | aryamansriram/Movel_Nav | a64c32528b7ce0a5a19127ba3a9379dca0201356 | 0e5e64232a01771999d34694f3bf6840f0c1e3ee | refs/heads/master | 2023-01-03T20:35:22.041816 | 2020-10-21T13:37:11 | 2020-10-21T13:37:11 | 305,279,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;std_msgs;sensor_msgs;geometry_msgs;nav_msgs;tf;gazebo_ros".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "turtlebot3_gazebo"
PROJECT_SPACE_DIR = "/home/rosguy/catkin_ws/install"
PROJECT_VERSION = "1.3.0"
| [
"aryaman.sriram@gmail.com"
] | aryaman.sriram@gmail.com |
a56b0f43ebb055a955f4b91d655e5a6258a88316 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/sample/call_with_args-62.py | 4f9f4f43d1a4dd278da2ce2bcebc76151bda09ff | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | def f(x:int) -> int:
print("start f")
print(x)
g(1, x)
print("end f")
return $ID
def g(y:int, z:int) -> object:
print("start g")
print(y)
print(z)
h("h")
print("end g")
def h(msg: str) -> object:
print(msg)
print(f(4))
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
56f4db815464164dca473402336da39fcdba854d | 9f387c703dbf4d970d0259424c7b299108c369f5 | /dd_sdk_1_0/dd_sdk_1_0/models/filesys_modify_ops.py | 9177d423a4bcefbabc66c3e02c4630ffe8dc1fa7 | [] | no_license | gcezaralmeida/datadomain_sdk_python | c989e6846bae9435c523ab09e230fc12d020f7f1 | e102ec85cea5d888c8329626892347571832e079 | refs/heads/main | 2023-08-23T22:42:47.083754 | 2021-10-25T21:52:49 | 2021-10-25T21:52:49 | 370,805,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,292 | py | # coding: utf-8
"""
DataDomain Rest API Documentation
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from dd_sdk_1_0.configuration import Configuration
class FilesysModifyOps(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
allowed enum values
"""
CLEAN_START = "clean_start"
CLEAN_STOP = "clean_stop"
CLEAN_THROTTLE_SET = "clean_throttle_set"
CLEAN_SCHEDULE_SET = "clean_schedule_set"
CLEAN_FREQUENCY_SET = "clean_frequency_set"
CLEAN_THROTTLE_RESET = "clean_throttle_reset"
CLEAN_SCHEDULE_RESET = "clean_schedule_reset"
CLEAN_FREQUENCY_RESET = "clean_frequency_reset"
CREATE = "create"
DESTROY = "destroy"
ENABLE = "enable"
DISABLE = "disable"
SET_OPTIONS = "set_options"
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self, _configuration=None): # noqa: E501
"""FilesysModifyOps - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(FilesysModifyOps, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FilesysModifyOps):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, FilesysModifyOps):
return True
return self.to_dict() != other.to_dict()
| [
"root@s6006st157.petrobras.biz"
] | root@s6006st157.petrobras.biz |
5fd49f494b2b838a3482980a68ee44e46623577d | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02410/s906651945.py | 5994492f3b28fd261d02d1352610aa7ca109b6f7 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | h,w= [int(i) for i in input().split()]
x=[[int(0)for i in range(w)]for j in range(h)]
z=[int(0 ) for i in range(w)]
for i in range(h):
y=[int(k) for k in input().split()]
for s in range(w):
x[i][s]=y[s]
for s in range(w):
z[s]=int(input())
for i in range(h):
count=0
for s in range(w):
count=count+x[i][s]*z[s]
print(count) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
6eed796c4646e9c5c07bbf85d7d007048853ede1 | 32cb0be487895629ad1184ea25e0076a43abba0a | /LifePictorial/top/api/rest/BillAccountsGetRequest.py | ee88bbee2b0c2e64f107900eeeb038f5b475d896 | [] | no_license | poorevil/LifePictorial | 6814e447ec93ee6c4d5b0f1737335601899a6a56 | b3cac4aa7bb5166608f4c56e5564b33249f5abef | refs/heads/master | 2021-01-25T08:48:21.918663 | 2014-03-19T08:55:47 | 2014-03-19T08:55:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | '''
Created by auto_sdk on 2014-02-10 16:59:30
'''
from top.api.base import RestApi
class BillAccountsGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.aids = None
self.fields = None
def getapiname(self):
return 'taobao.bill.accounts.get'
| [
"poorevil@gmail.com"
] | poorevil@gmail.com |
d10c78f88f98a3be61fa933ce992b949dfeb81db | d772869033c47a666622e9ee518bb306db5451a5 | /unified/modules/main/categories/accounting_invoicing/xero/entities/xero_invoice.py | da50c020a25e0b110bf4d62235f327a613008e45 | [] | no_license | funny2code/unified_api | 920f1e19b2304e331b019f8a531d412b8759e725 | ffa28ba0e5c0bd8ad7dd44a468e3d1e777bba725 | refs/heads/main | 2023-08-31T16:00:17.074427 | 2021-10-04T04:09:45 | 2021-10-04T04:09:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,228 | py | from dataclasses import dataclass
from accounting_invoicing.entities.invoice import Invoice
from accounting_invoicing.xero import util
@dataclass
class XeroInvoice(Invoice):
organization_id: str = None
item_code: str = None
quantity: str = None
unit_price: str = None
discount: str = None
account: str = None
tax_rate: str = None
attachment_url: str = None
branding_theme: str = None
reference: str = None
send_to_contact: str = None
line_items_type: str = None
creation_date: str = None
due_date: str = None
def __post_init__(self):
self.creation_date_epoch()
self.due_date_epoch()
def due_date_epoch(self):
if self.due_date is None or "-" in self.due_date:
self.due_date = self.due_date
else:
format = "%Y-%m-%d"
self.due_date = util.epoch_to_format(
format, self.due_date)
def creation_date_epoch(self):
if self.creation_date is None or "-" in self.creation_date:
self.creation_date = self.creation_date
else:
format = "%Y-%m-%d"
self.creation_date = util.epoch_to_format(
format, self.creation_date) | [
"baidawardipendar@gmail.com"
] | baidawardipendar@gmail.com |
48d0a43fd14d16e80e27430a7eca20b2532741bf | e298bf40ae88c2bd8e0a07f3e92f3e08a92edcc6 | /keystoneauth1/tests/unit/test_betamax_serializer.py | 05056e14821aababc7a81ec00da546c9464d5540 | [] | no_license | KevinKaiQian/polar-bear | 46a814c746246394f76505846166673a049f12f2 | 61d4e0ccd7328a6aa543af3b75e5f7fedf98bf8e | refs/heads/master | 2022-04-29T02:15:35.536039 | 2021-05-19T12:33:07 | 2021-05-19T12:33:07 | 172,068,536 | 2 | 0 | null | 2022-03-29T21:56:51 | 2019-02-22T13:11:58 | Python | UTF-8 | Python | false | false | 1,984 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import testtools
import yaml
from keystoneauth1.fixture import serializer
class TestBetamaxSerializer(testtools.TestCase):
TEST_FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'data', 'ksa_betamax_test_cassette.yaml')
TEST_JSON = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'data', 'ksa_serializer_data.json')
def setUp(self):
super(TestBetamaxSerializer, self).setUp()
self.serializer = serializer.YamlJsonSerializer()
def test_deserialize(self):
data = self.serializer.deserialize(open(self.TEST_FILE, 'r').read())
request = data['http_interactions'][0]['request']
self.assertEqual(
'http://keystoneauth.betamax_test/v2.0/tokens',
request['uri'])
payload = json.loads(request['body']['string'])
self.assertEqual('test_tenant_name', payload['auth']['tenantName'])
def test_serialize(self):
data = json.loads(open(self.TEST_JSON, 'r').read())
serialized = self.serializer.serialize(data)
data = yaml.load(serialized)
request = data['http_interactions'][0]['request']
self.assertEqual(
'http://keystoneauth.betamax_test/v2.0/tokens',
request['uri'])
payload = json.loads(request['body']['string'])
self.assertEqual('test_tenant_name', payload['auth']['tenantName'])
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
1d3160661f1b48fa56a02a364187eda4d6dd9bdd | 4ef688b93866285bcc27e36add76dc8d4a968387 | /moto/mediaconnect/urls.py | 75f24e173d40ce687beabb03d6dfb00bd9247d7c | [
"Apache-2.0"
] | permissive | localstack/moto | cec77352df216cac99d5e0a82d7ada933950a0e6 | b0b2947e98e05d913d7ee2a0379c1bec73f7d0ff | refs/heads/localstack | 2023-09-01T05:18:16.680470 | 2023-07-10T09:00:26 | 2023-08-07T14:10:06 | 118,838,444 | 22 | 42 | Apache-2.0 | 2023-09-07T02:07:17 | 2018-01-25T00:10:03 | Python | UTF-8 | Python | false | false | 1,121 | py | from .responses import MediaConnectResponse
url_bases = [
r"https?://mediaconnect\.(.+)\.amazonaws.com",
]
response = MediaConnectResponse()
url_paths = {
"{0}/v1/flows$": response.dispatch,
"{0}/v1/flows/(?P<flowarn>[^/.]+)$": response.dispatch,
"{0}/v1/flows/(?P<flowarn>[^/.]+)/vpcInterfaces$": response.dispatch,
"{0}/v1/flows/(?P<flowarn>[^/.]+)/vpcInterfaces/(?P<vpcinterfacename>[^/.]+)$": response.dispatch,
"{0}/v1/flows/(?P<flowarn>[^/.]+)/source$": response.dispatch,
"{0}/v1/flows/(?P<flowarn>[^/.]+)/source/(?P<sourcearn>[^/.]+)$": response.dispatch,
"{0}/v1/flows/(?P<flowarn>[^/.]+)/outputs$": response.dispatch,
"{0}/v1/flows/(?P<flowarn>[^/.]+)/outputs/(?P<outputarn>[^/.]+)$": response.dispatch,
"{0}/v1/flows/(?P<flowarn>[^/.]+)/entitlements$": response.dispatch,
"{0}/v1/flows/(?P<flowarn>[^/.]+)/entitlements/(?P<entitlementarn>[^/.]+)$": response.dispatch,
"{0}/v1/flows/start/(?P<flowarn>[^/.]+)$": response.dispatch,
"{0}/v1/flows/stop/(?P<flowarn>[^/.]+)$": response.dispatch,
"{0}/tags/(?P<resourcearn>[^/.]+)$": response.dispatch,
}
| [
"noreply@github.com"
] | localstack.noreply@github.com |
6dc95ec44638ba3fae62440e52bc35d76eebc7fa | 741ee09b8b73187fab06ecc1f07f46a6ba77e85c | /AutonomousSourceCode/data/raw/sort/8e70a99e-d9e0-43dd-92cb-145c8269f8c0__SelectionSort.py | 498707c9f714b885f65517fe506d5d31dc91aa87 | [] | no_license | erickmiller/AutomatousSourceCode | fbe8c8fbf215430a87a8e80d0479eb9c8807accb | 44ee2fb9ac970acf7389e5da35b930d076f2c530 | refs/heads/master | 2021-05-24T01:12:53.154621 | 2020-11-20T23:50:11 | 2020-11-20T23:50:11 | 60,889,742 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,423 | py | ##############################################################################################################################
# Project: Selection Sort
# Time Complexity: O(n2)
# Space Complexity: O(1)
# Stability: Not stable
# Info: Decrease-by-one sorting
##############################################################################################################################
# Selection Sort
def SelectionSort(L):
unsorted_list = L
sorted_list = []
while unsorted_list != []:
least = unsorted_list[0]
for e in unsorted_list:
if e < least:
least = e
unsorted_list.remove(least)
sorted_list.append(least)
return sorted_list
##############################################################################################################################
# In-place Selection Sort
def Inplace_SelectionSort(L):
for i in range(len(L)):
l = i
for j in range(i+1, len(L)):
if L[j] < L[l]:
l = j
#swap L[i] and L[l]
L[l], L[i] = L[i], L[l]
##############################################################################################################################
def main():
L1 = [1, 4, 3, 5, 6, 2]
print "SelectionSort: ", SelectionSort(L1)
L2 = [1, 4, 3, 5, 6, 2]
Inplace_SelectionSort(L2)
print "Inplace SelectionSort: ", L2
if __name__ == '__main__':
main() | [
"erickmiller@gmail.com"
] | erickmiller@gmail.com |
9b9e47ae2a88e97c4c88d6b7f2f048642b6f2f52 | fc6809a89979b23b53103224d30fcd66739e7d45 | /studyproject/secondapp/urls.py | 6d4410265ad22938e0dd83fc593746a3a194559c | [] | no_license | SUJEONG999/Django_pratice | b97371d3f80f55bf419c7cf906a8c071c31f6de0 | e2f1cdcbb1d1cc3c9e20e105621bc9c31d877934 | refs/heads/master | 2023-02-27T22:48:26.027877 | 2021-02-03T14:14:02 | 2021-02-03T14:14:02 | 333,454,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,202 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.exam1, name='exam1'),
path('exam2/', views.exam2, name='exam2'),
path('exam2_1/', views.exam2_1, name='exam2_1'),
path('exam3/', views.exam3, name='exam3'),
path('exam4/', views.exam4, name='exam4'),
path('exam5/', views.exam5, name='exam5'),
path('exam6/', views.exam6, name='exam6'),
path('exam7/', views.exam7, name='exam7'),
path('exam8/', views.exam8, name='exam8'),
path('exam9/', views.exam9, name='exam9'),
path('exam10/<name>/', views.exam10, name='exam10'),
path('exam11/<name>/<int:age>', views.exam11, name='exam11'),
path('exam12/<int:num1>/<int:num2>', views.exam12, name='exam12'),
path('exam13/', views.exam13, name='exam13'),
path('exam14/<word>/<int:num1>/<num2>', views.exam14, name='exam14'),
path('exam15/', views.exam15, name='exam15'),
path('exam16/', views.exam16, name='unico'),
path('exam17/', views.exam17, name='exam17'),
path('exam18/', views.exam18, name='exam18'),
path('exam19/', views.exam19, name='exam19'),
path('exam20/', views.exam20, name='exam20'),
path('exam21/', views.exam21, name='exam21'),
path('exam22/', views.exam22, name='exam22'),
path('exam23/', views.exam23, name='exam23'),
path('exam24/', views.exam24, name='exam24'),
path('json1/', views.json1, name='json1'),
path('json2/', views.json2, name='json2'),
path('json3/', views.json3, name='json3'),
path('product1/', views.product1, name='product1'),
path('basket1/', views.basket1, name='basket1'),
path('product2/', views.product2, name='product2'),
path('basket2/', views.basket2, name='basket2'),
path('ggmap1/', views.ggmap1),
path('ggmap2/', views.ggmap2),
path('ggmap3/', views.ggmap3),
path('ggmap4/', views.ggmap4),
path('ggmap5/', views.ggmap5),
path('ggmap6/', views.ggmap6),
path('ggmap7/', views.ggmap7),
path('kkmap1/', views.kkmap1),
path('kkmap2/', views.kkmap2),
path('kkmap3/', views.kkmap3),
path('kkmap4/', views.kkmap4),
path('kkmap5/', views.kkmap5),
path('kkmap6/', views.kkmap6),
path('kkmap7/', views.kkmap7),
] | [
"ejd090393@gmail.com"
] | ejd090393@gmail.com |
332dd81bd654d419aa0ab72a80a856550d58edda | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03046/s648899043.py | c7f33cd66e2bf7359ad68555df1dc4decc183dab | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | m, k = map(int, input().split())
if m==1 and k==0:
print(0,0,1,1)
exit()
if m==k==1:
print(-1)
exit()
if 2**m<=k:
print(-1)
exit()
ans = [k]
for i in range(2**m):
if i != k:
ans.append(i)
ans.append(k)
for i in reversed(range(2**m)):
if i != k:
ans.append(i)
print(*ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
9bb9e2a6e620080d496936566fb0b58eb6e60b67 | 6ab6c60d4ff44b9b0810d945f382b79319788a8f | /setup.py | 879d52a4882ba4dc734617c776ff5f9d55669708 | [] | no_license | godblessyouyou/sptt | d83d565b97ca08e618243a1f9f2a82f4dece1f60 | 0fd50b1b342d35bf22bef15e23fed0eeb5c459bb | refs/heads/master | 2021-01-17T08:44:36.675135 | 2017-03-05T07:16:29 | 2017-03-05T07:16:29 | 83,952,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,522 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
'Click>=6.0',
# TODO: put package requirements here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='sptt',
version='0.1.0',
description="Storage Performance Testing Tools(sptt), test storage preformance.",
long_description=readme + '\n\n' + history,
author="StorageTeam",
author_email='storage@inspur.com',
url='https://github.com/godblessyouyou/sptt',
packages=[
'sptt',
],
package_dir={'sptt':
'sptt'},
entry_points={
'console_scripts': [
'sptt=sptt.cli:main'
]
},
include_package_data=True,
install_requires=requirements,
zip_safe=False,
keywords='sptt',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=test_requirements
)
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
60c397b7fa0caca4ae1c68271ecfcfc67a912e24 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_weirdest.py | fae73d3f30bb27efbeb6b19d1c97a8c6d03f5f4d | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py |
from xai.brain.wordbase.adjectives._weird import _WEIRD
#calss header
class _WEIRDEST(_WEIRD, ):
def __init__(self,):
_WEIRD.__init__(self)
self.name = "WEIRDEST"
self.specie = 'adjectives'
self.basic = "weird"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
32c4dbc189e3ef9c9ba4b450d03936f43eb777a4 | c1bd12405d244c5924a4b069286cd9baf2c63895 | /azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/patch_route_filter_rule_py3.py | 9cec9cfdb33db858b510b39b774245835560b7b4 | [
"MIT"
] | permissive | lmazuel/azure-sdk-for-python | 972708ad5902778004680b142874582a284a8a7c | b40e0e36cc00a82b7f8ca2fa599b1928240c98b5 | refs/heads/master | 2022-08-16T02:32:14.070707 | 2018-03-29T17:16:15 | 2018-03-29T17:16:15 | 21,287,134 | 1 | 3 | MIT | 2019-10-25T15:56:00 | 2014-06-27T19:40:56 | Python | UTF-8 | Python | false | false | 3,100 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class PatchRouteFilterRule(SubResource):
"""Route Filter Rule Resource.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Resource ID.
:type id: str
:param access: Required. The access type of the rule. Valid values are:
'Allow', 'Deny'. Possible values include: 'Allow', 'Deny'
:type access: str or ~azure.mgmt.network.v2017_08_01.models.Access
:ivar route_filter_rule_type: Required. The rule type of the rule. Valid
value is: 'Community'. Default value: "Community" .
:vartype route_filter_rule_type: str
:param communities: Required. The collection for bgp community values to
filter on. e.g. ['12076:5010','12076:5020']
:type communities: list[str]
:ivar provisioning_state: The provisioning state of the resource. Possible
values are: 'Updating', 'Deleting', 'Succeeded' and 'Failed'.
:vartype provisioning_state: str
:ivar name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:vartype name: str
:ivar etag: A unique read-only string that changes whenever the resource
is updated.
:vartype etag: str
:param tags: Resource tags.
:type tags: dict[str, str]
"""
_validation = {
'access': {'required': True},
'route_filter_rule_type': {'required': True, 'constant': True},
'communities': {'required': True},
'provisioning_state': {'readonly': True},
'name': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'access': {'key': 'properties.access', 'type': 'str'},
'route_filter_rule_type': {'key': 'properties.routeFilterRuleType', 'type': 'str'},
'communities': {'key': 'properties.communities', 'type': '[str]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
route_filter_rule_type = "Community"
def __init__(self, *, access, communities, id: str=None, tags=None, **kwargs) -> None:
super(PatchRouteFilterRule, self).__init__(id=id, **kwargs)
self.access = access
self.communities = communities
self.provisioning_state = None
self.name = None
self.etag = None
self.tags = tags
| [
"noreply@github.com"
] | lmazuel.noreply@github.com |
64374c50564396066362b396a35a11fab804fd1b | 95540a155c043dd84ea6c0fb7d59ba06dc78b875 | /python/9/20 教学管理系统/项目/myPro/myPro/urls.py | d3410dc210c5c4e37e5f0484f48cb52d404b5ae0 | [] | no_license | Lilenn/must | 41b95d8e80f48a6b82febb222936bbc3502cc01f | a510a8d0e58fde1bc97ab7ad9bd2738158dcba5e | refs/heads/master | 2020-04-09T23:09:20.116439 | 2018-12-06T09:02:09 | 2018-12-06T09:02:09 | 160,648,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,026 | py | """myPro URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from myApp import views
from myApp.views import RegisterView,LoginView
urlpatterns = [
path('admin/', admin.site.urls),
path('subject/',views.subject),
path(r'register/', RegisterView.as_view(), name='register'),
path(r'login/', LoginView.as_view(), name='login'),
path('subject/',include('myApp.urls'))
]
| [
"846077763@qq.com"
] | 846077763@qq.com |
0da22359cc22d59fa30c54084f1a2e1c078ffe13 | 4c2ffec49d18913ff5bcd006d6cde7b0371e9f6a | /src/middlewared/middlewared/pytest/unit/plugins/test_pool.py | 412df5d85f0f5ba252662e3334ff2893c8f539c9 | [] | no_license | cyberpower678/freenas | 7b6bbcd7288ef76af66c9f0482652e16b6c4a540 | 3bd269199b404b8d3efe9e867d2c4f37ab187c4b | refs/heads/master | 2022-11-26T10:49:04.041283 | 2020-08-06T18:31:00 | 2020-08-06T18:31:00 | 285,836,451 | 2 | 0 | null | 2020-08-07T13:31:58 | 2020-08-07T13:31:57 | null | UTF-8 | Python | false | false | 903 | py | import textwrap
import pytest
from middlewared.plugins.pool import parse_lsof
@pytest.mark.parametrize("lsof,dirs,result", [
(
textwrap.dedent("""\
p535
cpython3.7
f5
n/usr/lib/data
p536
cpython3.7
f5
n/dev/zvol/backup/vol1
p537
cpython3.7
f5
n/dev/zvol/tank/vols/vol1
p2520
csmbd
f9
n/mnt/tank/blob1
f31
n/mnt/backup/blob2
p97778
cminio
f7
n/mnt/tank/data/blob3
"""),
["/mnt/tank", "/dev/zvol/tank"],
[
(537, "python3.7"),
(2520, "smbd"),
(97778, "minio"),
]
)
])
def test__parse_lsof(lsof, dirs, result):
assert parse_lsof(lsof, dirs) == result
| [
"themylogin@gmail.com"
] | themylogin@gmail.com |
6af25873ec799701ac79216d056ebc143a991453 | a7d5fad9c31dc2678505e2dcd2166ac6b74b9dcc | /dlkit/abstract_osid/osid/license.py | b03a71f2b314c591a316fef14570d1838b5d203c | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mitsei/dlkit | 39d5fddbb8cc9a33e279036e11a3e7d4fa558f70 | 445f968a175d61c8d92c0f617a3c17dc1dc7c584 | refs/heads/master | 2022-07-27T02:09:24.664616 | 2018-04-18T19:38:17 | 2018-04-18T19:38:17 | 88,057,460 | 2 | 1 | MIT | 2022-07-06T19:24:50 | 2017-04-12T13:53:10 | Python | UTF-8 | Python | false | false | 1,836 | py | # -*- coding: utf-8 -*-
"""Core Service Interface Definitions
osid version 3.0.0
Copyright (c) 2002-2004, 2006-2008 Massachusetts Institute of
Technology.
Copyright (c) 2009-2012 Ingenescus. All Rights Reserved.
This Work is being provided by the copyright holder(s) subject to the
following license. By obtaining, using and/or copying this Work, you
agree that you have read, understand, and will comply with the following
terms and conditions.
Permission to use, copy and distribute unmodified versions of this Work,
for any purpose, without fee or royalty is hereby granted, provided that
you include the above copyright notices and the terms of this license on
ALL copies of the Work or portions thereof.
You may modify or create Derivatives of this Work only for your internal
purposes. You shall not distribute or transfer any such Derivative of
this Work to any location or to any third party. For the purposes of
this license, "Derivative" shall mean any derivative of the Work as
defined in the United States Copyright Act of 1976, such as a
translation or modification.
This Work and the information contained herein is provided on an "AS IS"
basis WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS IN THE WORK.
The export of software employing encryption technology may require a
specific license from the United States Government. It is the
responsibility of any person or organization contemplating export to
obtain such a license before exporting this Work.
"""
| [
"cjshaw@mit.edu"
] | cjshaw@mit.edu |
b20b524f61b9022b32cd9d31ae8a0bf39be4db79 | ff1a0c651542396ca6abeff8531e4dd4cf4d0def | /utils/wx.py | 305d38a7c3070f9a6d960e39ffd00d8d49850320 | [] | no_license | zptime/interact | 4129fe8a42352ee7f402d57a74603e3d8dcceb49 | fd6c1ba12877c274c07110a95f5f9971fe1a878e | refs/heads/master | 2023-01-04T17:00:54.084476 | 2019-12-08T13:46:38 | 2019-12-08T13:46:38 | 226,673,825 | 0 | 0 | null | 2022-12-27T14:59:08 | 2019-12-08T13:42:25 | JavaScript | UTF-8 | Python | false | false | 1,901 | py | # coding=utf-8
from urlparse import urljoin
import os
import requests
import logging
import json
from applications.user_center.agents import get_uc_internal_domain_list
from applications.user_center.utils import RoundTripDecoder
from utils.errcode import WX_FETCH_VOICE_FAIL, WX_GET_ACCESS_TOKEN_FAIL
from utils.tools import BusinessException
logger = logging.getLogger(__name__)
def get_access_token(school_id):
from applications.common.services import domain_uc
#从用户中心获取微信access_token,用户中心保证token不过期
uc_url = os.path.join(get_uc_internal_domain_list()[0], 'get/access_token')
logger.info('visit user_center %s to get access_token' % uc_url)
response = requests.get(
uc_url,
stream=True,
params={'sid': str(school_id)})
response_dict = json.loads(response.text, cls=RoundTripDecoder)
if response.status_code != 200 or 'c' not in response_dict or response_dict['c'] != 0:
raise BusinessException(WX_GET_ACCESS_TOKEN_FAIL)
return response_dict['d']
def fetch(user, media_id, token=None):
# 从微信服务器下载
access_token = token or get_access_token(user.school.id)
payload = {
'access_token': access_token,
'media_id': str(media_id),
}
response = requests.get(
'http://api.weixin.qq.com/cgi-bin/media/get',
stream=True,
params=payload)
logger.info('fetch media %s from weixin, response_code: %s, response_head: %s' % (media_id, response.status_code, response.headers))
if response.status_code != 200:
raise BusinessException(WX_FETCH_VOICE_FAIL)
elif 'errcode' in response.content:
logger.error('fetch media %s fail, reason: %s' % (media_id, response.content))
raise BusinessException(WX_FETCH_VOICE_FAIL)
else:
return response
| [
"1063764688@qq.com"
] | 1063764688@qq.com |
46567a7b0a2032642ad65290787b0dcf200cb9f0 | a14b3e43705d74da97451de8663e9a98c088aec3 | /dohq_teamcity/models/vcs_status.py | e17ed3cddde028912753ccb18652d2cf75268362 | [
"MIT"
] | permissive | devopshq/teamcity | b5a36d6573cdde2f7c72e77a8e605198a7c7124d | 7a73e05c0a159337ed317f8b8d8072e478a65ca6 | refs/heads/develop | 2023-01-16T07:06:07.514297 | 2022-12-30T10:24:07 | 2022-12-30T10:24:07 | 153,228,762 | 29 | 13 | MIT | 2023-09-08T08:49:56 | 2018-10-16T05:42:15 | Python | UTF-8 | Python | false | false | 2,203 | py | # coding: utf-8
from dohq_teamcity.custom.base_model import TeamCityObject
# from dohq_teamcity.models.vcs_check_status import VcsCheckStatus # noqa: F401,E501
class VcsStatus(TeamCityObject):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'current': 'VcsCheckStatus',
'previous': 'VcsCheckStatus'
}
attribute_map = {
'current': 'current',
'previous': 'previous'
}
def __init__(self, current=None, previous=None, teamcity=None): # noqa: E501
"""VcsStatus - a model defined in Swagger""" # noqa: E501
self._current = None
self._previous = None
self.discriminator = None
if current is not None:
self.current = current
if previous is not None:
self.previous = previous
super(VcsStatus, self).__init__(teamcity=teamcity)
@property
def current(self):
"""Gets the current of this VcsStatus. # noqa: E501
:return: The current of this VcsStatus. # noqa: E501
:rtype: VcsCheckStatus
"""
return self._current
@current.setter
def current(self, current):
"""Sets the current of this VcsStatus.
:param current: The current of this VcsStatus. # noqa: E501
:type: VcsCheckStatus
"""
self._current = current
@property
def previous(self):
"""Gets the previous of this VcsStatus. # noqa: E501
:return: The previous of this VcsStatus. # noqa: E501
:rtype: VcsCheckStatus
"""
return self._previous
@previous.setter
def previous(self, previous):
"""Sets the previous of this VcsStatus.
:param previous: The previous of this VcsStatus. # noqa: E501
:type: VcsCheckStatus
"""
self._previous = previous
| [
"allburov@gmail.com"
] | allburov@gmail.com |
97dab6cab60f2b2bd38e03fe6788e9cdd2bf1de3 | ab9d10f43173da753921415af0c07ef2dce3ed33 | /earlier-2020/python_mod_tutorials/mlutiprocess/tsubprocess.py | 9e7a4ccd5715f86cbff964b31cec96522e7d4aff | [
"Apache-2.0"
] | permissive | qianjinfighter/py_tutorials | e34f211735ab9827fc36b3e1cc72eba02fd6f4c2 | fed8e6c8d79f854a1cebcfd5c37297a163846208 | refs/heads/master | 2023-02-20T19:03:00.641107 | 2021-01-20T07:04:30 | 2021-01-20T07:04:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | import subprocess
import time
# obj = subprocess.Popen(['python' ,'tsleep.py'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
obj = subprocess.Popen(['python' ,'tsleep.py'])
# out,err = obj.communicate()
# print(out)
# obj.wait(2)
# print(obj.poll())
# time.sleep(2)
while True:
time.sleep(2)
if obj.poll() is None:
#
print("Running , ", obj.poll())
else:
print('Over pid', obj.poll())
obj.terminate()
| [
"transcendentsky@gmail.com"
] | transcendentsky@gmail.com |
6dc65d872d45ea164ab43525bedd6566971b6d58 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02901/s910176589.py | 8d6471bf6baa40512bb4955d46daff0150998477 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | import sys
def main():
input = sys.stdin.readline
N,M=map(int, input().split())
A,B,C=[],[],[]
for _ in range(M):
a,b=map(int, input().split())
A.append(a)
B.append(b-1)
C.append(list(map(lambda x:int(x)-1, input().split())))
#dp[i][j]=i番目の鍵まで見たとき、jの宝箱を開けられるようになった場合の費用の最小値
dp = [[10**9] * (1<<N) for _ in range(M+1)]
dp[0][0] = 0
for i in range(M):
ni = i + 1
nj = 0
for c in C[i]: nj |= 1 << c
for j in range(1<<N):
dp[ni][j] = min(dp[ni][j], dp[i][j])
dp[ni][j|nj] = min(dp[ni][j|nj], dp[i][j] + A[i])
print(dp[M][(1<<N)-1] if dp[M][(1<<N)-1] < 10**9 else -1)
if __name__ == '__main__':
main() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
7ed97924c969e4e32044a392de1488e91ab4290f | 0ee39556b48fe2b8427b97c2d14731cdbcedbd30 | /main.py | bba1dc976ae1585feac7e93ac28b211c35abece1 | [] | no_license | zibb03/BlackBox | 9978df048abb4d45cba2553d3d0d8e426433cbee | 65388af4a24b10757eaedbc685bb5a100b3b5b97 | refs/heads/main | 2023-08-22T04:04:57.650444 | 2021-10-02T05:04:34 | 2021-10-02T05:04:34 | 383,138,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,122 | py | # main.py
import cv2
import os
import ai
# 영상 처리
def video_processing(video_path, background):
face_mask_recognition_model = cv2.dnn.readNet(
'C:/Users/user/Documents/GitHub/blackbox/models/face_mask_recognition.prototxt',
'C:/Users/user/Documents/GitHub/blackbox/models/face_mask_recognition.caffemodel'
)
mask_detector_model = ai.create_model()
cap = cv2.VideoCapture(video_path)
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
now_frame = 1
if not os.path.exists('outputs'):
os.mkdir('outputs')
out = None
colors = [(0, 255, 0), (0, 0, 255)]
labels = ['with_mask', 'without_mask']
while cap.isOpened():
ret, image = cap.read()
if not ret:
break
height, width = image.shape[:2]
blob = cv2.dnn.blobFromImage(image, scalefactor=1., size=(300, 300), mean=(104., 177., 123.))
face_mask_recognition_model.setInput(blob)
face_locations = face_mask_recognition_model.forward()
result_image = image.copy()
for i in range(face_locations.shape[2]):
confidence = face_locations[0, 0, i, 2]
if confidence < 0.5:
continue
left = int(face_locations[0, 0, i, 3] * width)
top = int(face_locations[0, 0, i, 4] * height)
right = int(face_locations[0, 0, i, 5] * width)
bottom = int(face_locations[0, 0, i, 6] * height)
face_image = image[top:bottom, left:right]
face_image = cv2.resize(face_image, dsize=(224, 224))
face_image = cv2.cvtColor(face_image, cv2.COLOR_BGR2RGB)
predict = ai.predict(mask_detector_model, face_image)
cv2.rectangle(
result_image,
pt1=(left, top),
pt2=(right, bottom),
thickness=2,
color=colors[predict],
lineType=cv2.LINE_AA
)
cv2.putText(
result_image,
text=labels[predict],
org=(left, top - 10),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.8,
color=colors[predict],
thickness=2,
lineType=cv2.LINE_AA
)
if out is None:
out = cv2.VideoWriter(
'C:/Users/user/Documents/GitHub/blackbox/outputs/output.wmv',
fourcc,
cap.get(cv2.CAP_PROP_FPS),
(image.shape[1], image.shape[0])
)
else:
out.write(result_image)
# (10/400): 11%
print('(' + str(now_frame) + '/' + str(frame_count) + '): ' + str(now_frame * 100 // frame_count) + '%')
now_frame += 1
if not background:
cv2.imshow('result', result_image)
if cv2.waitKey(1) == ord('q'):
break
out.release()
cap.release()
if __name__ == '__main__':
video_processing('C:/Users/user/Documents/GitHub/blackbox/data/04.mp4', False) | [
"64014435+zibb03@users.noreply.github.com"
] | 64014435+zibb03@users.noreply.github.com |
2b27e7a2a4e7460fdf50bf531978e0b5f4ea0c02 | 27c94d7e040902d3cdadd5862b15e67ec2ee4b6e | /xautodl/models/cell_searchs/search_model_darts_nasnet.py | 7cfdb47836e33d106d3ab758ea8564013e6da95a | [
"MIT"
] | permissive | D-X-Y/AutoDL-Projects | 8a0779a7710d809af2b052787928d8d34c14d0d9 | f46486e21b71ae6459a700be720d7648b5429569 | refs/heads/main | 2023-08-13T10:53:49.550889 | 2022-04-24T22:18:16 | 2022-04-24T22:18:16 | 168,538,768 | 989 | 197 | MIT | 2022-04-24T22:16:21 | 2019-01-31T14:30:50 | Python | UTF-8 | Python | false | false | 6,430 | py | ####################
# DARTS, ICLR 2019 #
####################
import torch
import torch.nn as nn
from copy import deepcopy
from typing import List, Text, Dict
from .search_cells import NASNetSearchCell as SearchCell
# The macro structure is based on NASNet
class NASNetworkDARTS(nn.Module):
def __init__(
self,
C: int,
N: int,
steps: int,
multiplier: int,
stem_multiplier: int,
num_classes: int,
search_space: List[Text],
affine: bool,
track_running_stats: bool,
):
super(NASNetworkDARTS, self).__init__()
self._C = C
self._layerN = N
self._steps = steps
self._multiplier = multiplier
self.stem = nn.Sequential(
nn.Conv2d(3, C * stem_multiplier, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(C * stem_multiplier),
)
# config for each layer
layer_channels = (
[C] * N + [C * 2] + [C * 2] * (N - 1) + [C * 4] + [C * 4] * (N - 1)
)
layer_reductions = (
[False] * N + [True] + [False] * (N - 1) + [True] + [False] * (N - 1)
)
num_edge, edge2index = None, None
C_prev_prev, C_prev, C_curr, reduction_prev = (
C * stem_multiplier,
C * stem_multiplier,
C,
False,
)
self.cells = nn.ModuleList()
for index, (C_curr, reduction) in enumerate(
zip(layer_channels, layer_reductions)
):
cell = SearchCell(
search_space,
steps,
multiplier,
C_prev_prev,
C_prev,
C_curr,
reduction,
reduction_prev,
affine,
track_running_stats,
)
if num_edge is None:
num_edge, edge2index = cell.num_edges, cell.edge2index
else:
assert (
num_edge == cell.num_edges and edge2index == cell.edge2index
), "invalid {:} vs. {:}.".format(num_edge, cell.num_edges)
self.cells.append(cell)
C_prev_prev, C_prev, reduction_prev = C_prev, multiplier * C_curr, reduction
self.op_names = deepcopy(search_space)
self._Layer = len(self.cells)
self.edge2index = edge2index
self.lastact = nn.Sequential(nn.BatchNorm2d(C_prev), nn.ReLU(inplace=True))
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
self.arch_normal_parameters = nn.Parameter(
1e-3 * torch.randn(num_edge, len(search_space))
)
self.arch_reduce_parameters = nn.Parameter(
1e-3 * torch.randn(num_edge, len(search_space))
)
def get_weights(self) -> List[torch.nn.Parameter]:
xlist = list(self.stem.parameters()) + list(self.cells.parameters())
xlist += list(self.lastact.parameters()) + list(
self.global_pooling.parameters()
)
xlist += list(self.classifier.parameters())
return xlist
def get_alphas(self) -> List[torch.nn.Parameter]:
return [self.arch_normal_parameters, self.arch_reduce_parameters]
def show_alphas(self) -> Text:
with torch.no_grad():
A = "arch-normal-parameters :\n{:}".format(
nn.functional.softmax(self.arch_normal_parameters, dim=-1).cpu()
)
B = "arch-reduce-parameters :\n{:}".format(
nn.functional.softmax(self.arch_reduce_parameters, dim=-1).cpu()
)
return "{:}\n{:}".format(A, B)
def get_message(self) -> Text:
string = self.extra_repr()
for i, cell in enumerate(self.cells):
string += "\n {:02d}/{:02d} :: {:}".format(
i, len(self.cells), cell.extra_repr()
)
return string
def extra_repr(self) -> Text:
return "{name}(C={_C}, N={_layerN}, steps={_steps}, multiplier={_multiplier}, L={_Layer})".format(
name=self.__class__.__name__, **self.__dict__
)
def genotype(self) -> Dict[Text, List]:
def _parse(weights):
gene = []
for i in range(self._steps):
edges = []
for j in range(2 + i):
node_str = "{:}<-{:}".format(i, j)
ws = weights[self.edge2index[node_str]]
for k, op_name in enumerate(self.op_names):
if op_name == "none":
continue
edges.append((op_name, j, ws[k]))
# (TODO) xuanyidong:
# Here the selected two edges might come from the same input node.
# And this case could be a problem that two edges will collapse into a single one
# due to our assumption -- at most one edge from an input node during evaluation.
edges = sorted(edges, key=lambda x: -x[-1])
selected_edges = edges[:2]
gene.append(tuple(selected_edges))
return gene
with torch.no_grad():
gene_normal = _parse(
torch.softmax(self.arch_normal_parameters, dim=-1).cpu().numpy()
)
gene_reduce = _parse(
torch.softmax(self.arch_reduce_parameters, dim=-1).cpu().numpy()
)
return {
"normal": gene_normal,
"normal_concat": list(
range(2 + self._steps - self._multiplier, self._steps + 2)
),
"reduce": gene_reduce,
"reduce_concat": list(
range(2 + self._steps - self._multiplier, self._steps + 2)
),
}
def forward(self, inputs):
normal_w = nn.functional.softmax(self.arch_normal_parameters, dim=1)
reduce_w = nn.functional.softmax(self.arch_reduce_parameters, dim=1)
s0 = s1 = self.stem(inputs)
for i, cell in enumerate(self.cells):
if cell.reduction:
ww = reduce_w
else:
ww = normal_w
s0, s1 = s1, cell.forward_darts(s0, s1, ww)
out = self.lastact(s1)
out = self.global_pooling(out)
out = out.view(out.size(0), -1)
logits = self.classifier(out)
return out, logits
| [
"280835372@qq.com"
] | 280835372@qq.com |
51aadf25dc076bdcc46c39c9b24805b4128bd6c5 | 5845ee6d82d9f691e846360fa267b9cca6829d99 | /supervised_learning/0x06-keras/13-predict.py | e6012cab30f9023015c7765a6ef73341f7d4d14f | [] | no_license | jlassi1/holbertonschool-machine_learning | 6e8c11ebaf2fd57e101bd0b20b7d83358cc15374 | d45e18bcbe1898a1585e4b7b61f3a7af9f00e787 | refs/heads/main | 2023-07-02T20:25:52.216926 | 2021-08-11T14:19:49 | 2021-08-11T14:19:49 | 317,224,593 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | #!/usr/bin/env python3
""" 13. Predict """
import tensorflow.keras as K
def predict(network, data, verbose=False):
"""function that makes a prediction using a neural network"""
return network.predict(data, verbose=verbose)
| [
"khawlajlassi1990@gmail.com"
] | khawlajlassi1990@gmail.com |
91f5a82f7e5df943a4f4e68d94fa3f8283ed31ac | a4daf0bfff6cfdb9ae9d9fd99fbb2df1a9aea845 | /easy_images/conf/app_settings.py | a7ed81975325be21f99b6232ecf9a01ee3cb8712 | [] | no_license | Gointer/easy-images | ca29dcf4f88b88e5014c8792c882af871f84f799 | 132d43c507ce0e5355757923a28f81889bdb7e82 | refs/heads/master | 2021-01-23T23:25:17.012989 | 2015-07-07T01:04:39 | 2015-07-07T01:04:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,905 | py | from django.conf import settings as django_settings
class AppSettings(object):
"""
A holder for app-specific default settings (project settings have
priority).
Settings split with two underscores are looked up from project settings
as a dictionary, for example::
# In myapp.conf:
class Settings(AppSettings):
MYAPP__TASTE = 'sour'
MYAPP__SCENT = 'apple'
# In settings:
MYAPP = {
'TASTE': 'sweet',
}
Individual attributes can be retrieved, or entire underscored
dictionaries::
from myapp.conf import settings
print("Tastes {0}".format(settings.MYAPP__TASTE))
myapp_settings = settings.MYAPP
print("Smells like {0}".format(myapp_settings['SCENT']))
"""
def __getattribute__(self, attr):
# Retrieve (or build) any settings dictionaries (split by two
# undescores).
try:
dicts = super(AppSettings, self).__getattribute__('_app_dicts')
except AttributeError:
dicts = []
for key in dir(self):
if not key.startswith('_') and '__' in key:
potential_dict = key.split('__', 1)[0]
if potential_dict.upper():
dicts.append(potential_dict)
self._app_dicts = dicts
# If we're trying to get a settings dictionary, build and return it.
if attr in dicts:
dict_prefix = attr + '__'
settings_dict = getattr(django_settings, attr, {}).copy()
for full_key in dir(self):
if not full_key.startswith(dict_prefix):
continue
key = full_key[len(dict_prefix):]
if key in settings_dict:
continue
settings_dict[key] = super(AppSettings, self).__getattribute__(
full_key)
return settings_dict
# If it's a dictionary attribute we're looking for, retrieve it.
dict_attr = (
not attr.startswith('_')
and '__' in attr
and attr.split('__', 1)[0]
)
if dict_attr:
try:
settings_dict = getattr(django_settings, dict_attr)
dict_prefix = dict_attr and dict_attr + '__'
return settings_dict[attr[len(dict_prefix):]]
except (AttributeError, KeyError):
return super(AppSettings, self).__getattribute__(attr)
# It must be just a standard attribute.
try:
# If it's not upper case then it's just an attribute of this class.
if attr != attr.upper() and not dict_attr:
raise AttributeError()
return getattr(django_settings, attr)
except AttributeError:
return super(AppSettings, self).__getattribute__(attr)
| [
"smileychris@gmail.com"
] | smileychris@gmail.com |
d2959cac8e253c8eb2217fcac17e2c06e072643c | 08eef4241e62bcff651e3002fc0809fe50aaaee3 | /unsupervised_learning/0x02-hmm/2-absorbing.py | 97591f0a8ce8c85dd1d220f662f8fa1f85f2cede | [] | no_license | Gaspela/holbertonschool-machine_learning | c4e470fed0623a5ef1399125b9f17fd4ae5c577b | b0c18df889d8bd0c24d4bdbbd69be06bc5c0a918 | refs/heads/master | 2023-04-02T00:34:16.350074 | 2021-04-03T02:27:41 | 2021-04-03T02:27:41 | 275,862,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 960 | py | #!/usr/bin/env python3
"""
Absorbing Chains
"""
import numpy as np
def absorbing(P):
"""
P is a is a square 2D numpy.ndarray of shape (n, n) representing the
standard transition matrix
P[i, j] is the probability of transitioning from state i to state j
n is the number of states in the markov chain
Returns: True if it is absorbing, or False on failure
"""
if type(P) is not np.ndarray or len(P.shape) != 2:
return None
if P.shape[0] != P.shape[1]:
return None
if np.any(P < 0):
return None
if np.min(P ** 2) < 0 or np.min(P ** 3) < 0:
return None
P = P.copy()
absorb = np.ndarray(P.shape[0])
while True:
prev = absorb.copy()
absorb = np.any(P == 1, axis=0)
if absorb.all():
return True
if np.all(absorb == prev):
return False
absorbed = np.any(P[:, absorb], axis=1)
P[absorbed, absorbed] = 1
| [
"samirmillanorozco@hotmail.com"
] | samirmillanorozco@hotmail.com |
2cc9b79f6a14304a72524de11a1c7b11e8c4b1c1 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_199/1294.py | 9c6bae4305bb8f61fad8ae6294dd15b924149e81 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | #!/usr/bin/env python3
T = int(input())
for t in range(1, T+1):
S, K = input().split()
S = [ 1 if s=='+' else -1 for s in S]
K = int(K)
pcnt = 0
n = 0
for i in range(len(S)-K+1):
if S[i] == -1:
for j in range(K):
S[i+j] *= -1
n += 1
for s in S[-K+1:]:
if s == -1:
n = 'IMPOSSIBLE'
break
print("Case #{0}: {1}".format(t, n))
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
19b0fff0df677e85eaab65148123d7ff5cf6aea4 | cd9f819b968def4f9b57448bdd926dc5ffa06671 | /B_物件導向Python與TensorFlow應用_鄭仲平_廣稅文化_2019/Ch10/ex10-04.py | 2938bff72f8983f3c8fe553d9fe15a8c7434ce60 | [] | no_license | AaronCHH/jb_pyoop | 06c67f3c17e722cf18147be4ae0fac81726e4cbc | 356baf0963cf216db5db7e11fb67234ff9b31b68 | refs/heads/main | 2023-04-02T05:55:27.477763 | 2021-04-07T01:48:04 | 2021-04-07T01:48:13 | 344,676,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,497 | py | from abc import ABC, abstractmethod
class ICell(ABC):
@abstractmethod
def GetPower(self):
pass
@abstractmethod
def SetLinkToNext(self, nc):
pass
class ILight(ABC):
@abstractmethod
def AddCell(self, cp):
pass
@abstractmethod
def Power(self):
pass
class PanasonicCell(ICell):
def __init__(self):
self.pw = 10
self.next_cell = None
def SetLinkToNext(self, nc):
self.next_cell = nc
def GetPower(self):
if self.next_cell == None:
return self.pw
else:
return self.pw + self.next_cell.GetPower()
class CatCell(ICell):
def __init__(self):
self.pw = 5
self.next_cell = None
def SetLinkToNext(self, nc):
self.next_cell = nc
def GetPower(self):
if self.next_cell == None:
return self.pw
else:
return self.pw + self.next_cell.GetPower()
class FlashLight():
def __init__(self):
self.head = None
self.tail = self.head
def AddCell(self, cp):
if self.head == None:
self.head = cp
self.tail = self.head
else:
self.tail.SetLinkToNext(cp)
self.tail = cp
def Power(self):
return self.head.GetPower()
if __name__ == '__main__':
light = FlashLight()
cell = CatCell()
light.AddCell(cell)
print(light.Power())
print("=======================")
cell = PanasonicCell()
light.AddCell(cell)
cell = CatCell()
light.AddCell(cell)
print(light.Power()) | [
"aaronhsu219@gmail.com"
] | aaronhsu219@gmail.com |
38f2c8c5aaa9b13fc231f75e346503e18e7a76e6 | 46d3b41f98164bd869ac7f987a644ba0d6839e2c | /chat_message/tests.py | 1573d434b31233309d3ebf352b1ad87d054734aa | [] | no_license | Dimas4/Blog | dc22392c82ffb7050418d05b4f149d79c91bca30 | c9d6322bcf0a4b35f80c51830d1f4d5ad7f41bb3 | refs/heads/master | 2021-07-22T00:31:59.491338 | 2018-12-01T12:55:21 | 2018-12-01T12:55:21 | 135,898,131 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 749 | py | from django.contrib.auth.models import User
from django.test import TestCase
from accounts.models import UserProfile
from .models import Messages
class ChatTest(TestCase):
def setUp(self):
user = User.objects.create(username="admin", email="admin@mail.ru", password="adminadmin")
user_profile = UserProfile.objects.create(user=user)
Messages.objects.create(author=user, author_profile=user_profile, content="Content")
def test_get_user_url(self):
message = Messages.objects.get(content="Content")
self.assertEqual(message.get_user_url(), '/account/1/')
def test_str(self):
message = Messages.objects.get(content="Content")
self.assertEqual(message.__str__(), message.content)
| [
"vaniashatuhomail@mail.ru"
] | vaniashatuhomail@mail.ru |
ed2bbad3804a1679f3211bbb4cb1e05fa8f23879 | 039f2c747a9524daa1e45501ada5fb19bd5dd28f | /ARC020/ARC020e.py | 3fb6dca801c396ba1e3b51146ac8e12109102dd9 | [
"Unlicense"
] | permissive | yuto-moriizumi/AtCoder | 86dbb4f98fea627c68b5391bf0cc25bcce556b88 | 21acb489f1594bbb1cdc64fbf8421d876b5b476d | refs/heads/master | 2023-03-25T08:10:31.738457 | 2021-03-23T08:48:01 | 2021-03-23T08:48:01 | 242,283,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | #ARC020e
def main():
import sys
input=sys.stdin.readline
sys.setrecursionlimit(10**6)
if __name__ == '__main__':
main() | [
"kurvan1112@gmail.com"
] | kurvan1112@gmail.com |
f92e3845080d0a95837073e550ec61628829bb78 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part003525.py | 43c14845da9a92c51f7ad39fcf26fdbd97dabae2 | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,296 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher108457(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.2.0', 1, 1, None), Mul),
(VariableWithCount('i2.2.1.2.0', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher108457._instance is None:
CommutativeMatcher108457._instance = CommutativeMatcher108457()
return CommutativeMatcher108457._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 108456
return
yield
from collections import deque | [
"franz.bonazzi@gmail.com"
] | franz.bonazzi@gmail.com |
649c2a65d3fcc1ab2fb930695cbfb94bac697201 | 72fcc9b617014484a1c021fa90af57b457aba5ba | /08.Hashing/22_sort_elements_frequency.py | 9a13fcc6977f45753107f647bb8fe698081fab6e | [] | no_license | shindesharad71/Data-Structures | 249cb89fc3b54a3d8a67e4e9db832e256d072ee6 | a7cd247228a723e880bccd3aa24c072722785f6d | refs/heads/main | 2023-07-24T21:01:08.070082 | 2021-09-03T04:02:05 | 2021-09-03T04:02:05 | 370,706,713 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | # Sort elements by frequency | Set 4 (Efficient approach using hash)
# https://www.geeksforgeeks.org/sort-elements-frequency-set-4-efficient-approach-using-hash/
from collections import defaultdict
def sort_by_freq(arr: list, n: int):
d = defaultdict(lambda: 0)
for i in range(n):
d[arr[i]] += 1
arr.sort(key=lambda x: (-d[x], x))
return arr
if __name__ == "__main__":
arr = [2, 5, 2, 6, -1, 9999999, 5, 8, 8, 8]
n = len(arr)
print(sort_by_freq(arr, n))
| [
"shindesharad71@gmail.com"
] | shindesharad71@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.