blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7d48674929e67d0b4ff1c26a9fdc2fdda1b32b00
|
83cf642504313b6ef6527dda52158a6698c24efe
|
/scripts/addons/remote_debugger.py
|
50b2e8e90a16e7b474042e3fa8c4903af0829552
|
[] |
no_license
|
PyrokinesisStudio/Fluid-Designer-Scripts
|
a4c40b871e8d27b0d76a8025c804d5a41d09128f
|
23f6fca7123df545f0c91bf4617f4de7d9c12e6b
|
refs/heads/master
| 2021-06-07T15:11:27.144473
| 2016-11-08T03:02:37
| 2016-11-08T03:02:37
| 113,630,627
| 1
| 0
| null | 2017-12-09T00:55:58
| 2017-12-09T00:55:58
| null |
UTF-8
|
Python
| false
| false
| 2,146
|
py
|
bl_info = {
'name' : 'Remote Debugger',
'author' : '',
'version' : (0, 2),
'blender' : (2, 75, 0),
'location' : '',
'category' : 'Development'}
import bpy
import os.path
from bpy.types import AddonPreferences
from bpy.props import StringProperty
class DebuggerAddonPreferences(AddonPreferences):
bl_idname = __name__
eggpath = StringProperty(name="Path of PyCharm egg file",
description="make sure to select the py3k egg",
subtype="FILE_PATH",
default="pycharm-debug-py3k.egg"
)
def draw(self, context):
layout = self.layout
layout.prop(self, 'eggpath')
layout.label(text="Make sure to select the egg for Python 3.x: pycharm-debug-py3k.egg")
class DEBUG_OT_connect_debugger(bpy.types.Operator):
bl_idname = "debug.connect_debugger"
bl_label = "Connect to remote Python debugger"
bl_description = "Connects to a PyCharm debugger on localhost:1090"
def execute(self, context):
import sys
user_preferences = context.user_preferences
addon_prefs = user_preferences.addons[__name__].preferences
eggpath = os.path.abspath(addon_prefs.eggpath)
if not os.path.exists(eggpath):
self.report({'ERROR'}, "Unable to find debug egg at %r. Configure the addon properties ")
return {'CANCELLED'}
if not any("pycharm-debug" in p for p in sys.path):
sys.path.append(eggpath)
import pydevd
pydevd.settrace('localhost', port=1090, stdoutToServer=True, stderrToServer=True)
return {'FINISHED'}
def register():
bpy.utils.register_class(DEBUG_OT_connect_debugger)
bpy.utils.register_class(DebuggerAddonPreferences)
def unregister():
bpy.utils.unregister_class(DEBUG_OT_connect_debugger)
bpy.utils.unregister_class(DebuggerAddonPreferences)
if __name__ == '__main__':
register()
|
[
"dev.andrewpeel@gmail.com"
] |
dev.andrewpeel@gmail.com
|
2dab992e4eb273021e8b75698a6f71b4a65cd9bb
|
66973fe6468e1b1f9cd7df765819ba0cb89f6ff8
|
/bin/gitstatus
|
cdb9115b3dd0b17ae88a8f7b6f75db6f5111949c
|
[
"MIT"
] |
permissive
|
dsavransky/miscpy
|
90fee44f245c5eca89897e6536dae702f8836484
|
2aab0e3d089a678ee7fee18be47f2b16591c150a
|
refs/heads/main
| 2022-06-15T21:54:07.285900
| 2022-05-31T23:25:27
| 2022-05-31T23:25:27
| 39,569,507
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,712
|
#!/usr/bin/env python
import os
import subprocess
import glob
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Check status on all git repos, fetching (or pulling) from remotes."
)
parser.add_argument(
"--pull",
action="store_true",
help="Do a pull (rather than default fetch on remotes)",
)
parser.add_argument(
"--noremote",
action="store_true",
help="Don't contact any remotes.",
)
args = parser.parse_args()
# define all relevant dirs
basedir = os.path.join(os.environ['HOME'], 'Documents')
gitdirs = ['Proposals', 'Talks', 'Teaching', 'TeXStuff', 'MATLAB', 'Notes',
'BragSheets', 'Reviews', 'Letters']
gitdirs = [os.path.join(basedir, d) for d in gitdirs]
gitdirs += glob.glob(os.path.join(basedir, 'gitrepos/*'))
for d in gitdirs:
if "gitrepos" in d:
tmp = subprocess.run(["git", "remote", "-v"], cwd=d,
capture_output=True).stdout.decode()
if "dsavransky" not in tmp:
continue
if not(args.noremote):
if args.pull:
res0 = subprocess.run(["git", "pull"], cwd=d, capture_output=True)
else:
res0 = subprocess.run(["git", "fetch"], cwd=d, capture_output=True)
res = subprocess.run(["git", "status"], cwd=d, capture_output=True).stdout.decode()
if ("Your branch is up to date" in res) and ("nothing to commit" in res) and\
("working tree clean" in res):
continue
print("{}\n".format(d))
print(res)
print("\n")
|
[
"dsavransky@gmail.com"
] |
dsavransky@gmail.com
|
|
0b3988687c2fcec1c85d3999fa91dd1ed46daa05
|
d9a912d066b8a10f2611ed80af8e720522da7a51
|
/tf.py
|
54cb79555f0d631b74adee30da18cc24db3adfbc
|
[] |
no_license
|
Sanny26/information_retrieval
|
871bba71d1e8d93329e24fc052d738d03241875f
|
4fb29c9da28fa1937f43c5b36fc470b0420f2ba0
|
refs/heads/master
| 2021-08-26T08:39:04.094595
| 2017-11-22T17:14:51
| 2017-11-22T17:14:51
| 111,576,998
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,603
|
py
|
"""Main code for finding TF-IDF scores."""
from collections import Counter
from math import log
from utils import preprocess_file
import os
import numpy as np
import pickle
def get_tf_idf_weights(path):
"""Get the wieghts for TF."""
doc_no = 0
doc_names = dict()
tf_list = dict() # how many term t occurs in doc d
df_list = dict() # how many docs contain term t
sub_dirs = os.listdir(path)
term_list = []
N = 0
for dr in sub_dirs:
dr_path = path + dr + "/"
files = os.listdir(dr_path)
for f in files:
f_path = dr_path+f
doc_names[doc_no] = f_path
doc_no += 1
print(doc_no)
processed_text = preprocess_file(f_path)
tf = Counter(processed_text)
for term, frequency in dict(tf).items():
if term not in tf_list:
tf_list[term] = []
term_list.append(term)
tf_list[term].append((doc_no, 1+log(frequency, 10)))
matrix = np.zeros((len(tf_list), doc_no+1), dtype=float)
for i, term in enumerate(list((tf_list.keys()))):
l = tf_list[term]
for doc_id, freq in l:
matrix[i, doc_id] = freq
return matrix, doc_names, term_list
def main():
"""Main."""
path = "test/"
weights, doc_names, terms = get_tf_idf_weights(path)
pickle.dump(weights, open("pickles/tf.p", "wb"))
pickle.dump(doc_names, open("pickles/tf-file-names.p", "wb"))
pickle.dump(terms, open("pickles/tf-terms.p", "wb"))
if __name__ == "__main__":
main()
|
[
"chris.g14@iiits.in"
] |
chris.g14@iiits.in
|
bac5086f28d452d811d08211fd9648f3fd55c034
|
41bea39563c74621924d79723f8ba84889958365
|
/nkamg_pcap/server/antimal/misc/trails/feeds/proxyspy.py
|
1ac62a432b900d21ea14ee85e56f7891e002c3b8
|
[
"MIT"
] |
permissive
|
NKQiuKF/pcap_update
|
abee0c13cb583fddb89eb9e86a487279bdc18f1d
|
679e3f116367394a5f58eb4f95b5318e80fee331
|
refs/heads/master
| 2022-10-21T17:49:30.706480
| 2019-09-02T09:22:06
| 2019-09-02T09:22:06
| 205,816,421
| 1
| 0
| null | 2022-10-06T18:33:32
| 2019-09-02T08:55:55
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 717
|
py
|
#!/usr/bin/env python
"""
Copyright (c) 2014-2016 Miroslav Stampar (@stamparm)
See the file 'LICENSE' for copying permission
"""
from core.common import retrieve_content
__url__ = "https://raw.githubusercontent.com/firehol/blocklist-ipsets/master/proxyspy_1d.ipset"
__check__ = "proxyspy_1d"
__info__ = "proxy (suspicious)"
__reference__ = "spys.ru"
def fetch():
retval = {}
content = retrieve_content(__url__)
if __check__ in content:
for line in content.split('\n'):
line = line.strip()
if not line or line.startswith('#') or '.' not in line:
continue
retval[line] = (__info__, __reference__)
return retval
|
[
"453341288@qq.com"
] |
453341288@qq.com
|
d00bc0b7e81a3e19920c70b70559eaeea7761f16
|
42e4cc242a2be105dae0288d02a08fbd95bb476a
|
/deepspeed/elasticity/constants.py
|
03cba725fa87d66ef5008c333c05243c149fe043
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
afiaka87/DeepSpeed
|
a49ca48a410190b631a78c392c25359ed4478577
|
83ff549aa365d4fee744074ac0a64f27571ecbc8
|
refs/heads/main
| 2023-04-14T16:22:37.595209
| 2021-04-12T09:20:06
| 2021-04-12T09:20:06
| 356,466,212
| 2
| 0
|
MIT
| 2021-04-12T09:20:07
| 2021-04-10T04:09:31
|
Python
|
UTF-8
|
Python
| false
| false
| 2,262
|
py
|
"""
Copyright 2020 The Microsoft DeepSpeed Team
"""
#########################################
# Elasticity
#########################################
''' Elasticity Utility in DeepSpeed can be used to create highly elastic jobs compatible
with a large number of GPUs. For elastic jobs, DeepSpeed will provide a batch size that
can support a large number of GPUs based on the user specified parameters
'''
FORMAT = '''
Elasticity should be enabled as:
"elasticity": {
"enabled": true,
"max_train_batch_size": 2000,
"micro_batch_sizes": [2,4,6],
"min_gpus": 1,
"max_gpus" : 10000
"min_time": 20,
"prefer_larger_batch": true,
"ignore_non_elastic_batch_info": false,
"version": 0.1
}
'''
ELASTICITY = 'elasticity'
# Current elasticity version
LATEST_ELASTICITY_VERSION = 0.1
ENABLED = 'enabled'
ENABLED_DEFAULT = False
# Max acceptable train_batch_size
MAX_ACCEPTABLE_BATCH_SIZE = 'max_train_batch_size'
MAX_ACCEPTABLE_BATCH_SIZE_DEFAULT = 2000
# Acceptable micro batch sizes, same as train_micro_batch_size_per_gpu
MICRO_BATCHES = 'micro_batch_sizes'
MICRO_BATCHES_DEFAULT = [2, 4, 6]
# Min/max of GPUs to search over
MIN_GPUS = 'min_gpus'
MIN_GPUS_DEFAULT = 1
MAX_GPUS = 'max_gpus'
MAX_GPUS_DEFAULT = 10000
# Minimum running time (minutes) before the scheduler will scale us, 0 implies it's unknown
MIN_TIME = "min_time"
MIN_TIME_DEFAULT = 0
# When finding a suitable batch size, attempt to find one that is closest
# to the max train batch size given.
PREFER_LARGER_BATCH = 'prefer_larger_batch'
PREFER_LARGER_BATCH_DEFAULT = True
# In order to reduce confusion, if elastic mode is enabled we
# require (via assert) that no batch info is set outside of the
# elastic config. You can turn off this assert via this config
# but keep in mind that all batch info defined outside the
# elastic mode *will be ignored*.
IGNORE_NON_ELASTIC_BATCH_INFO = 'ignore_non_elastic_batch_info'
IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT = False
# Version of elastic logic to use
VERSION = "version"
VERSION_DEFAULT = LATEST_ELASTICITY_VERSION
# Minimum deepspeed version to use elasticity
MINIMUM_DEEPSPEED_VERSION = "0.3.8"
# Environment variable storing elastic config from resource scheduler
DEEPSPEED_ELASTICITY_CONFIG = "DEEPSPEED_ELASTICITY_CONFIG"
|
[
"noreply@github.com"
] |
afiaka87.noreply@github.com
|
f8114ec28447eee38a12cf5ac1de1c2782d617a8
|
b34463870a1754e5f60029a77621c72f32b29a64
|
/08_cpp-overload-eigen/test.py
|
1d53e9920cbff02aaa4110da2d6b1f686d5c2d89
|
[
"MIT"
] |
permissive
|
strint/pybind11_examples
|
63d59c064a0e9a6d4ddd64413c3955df53a84969
|
f74b3cda2b9d39728923b9b155920e828b7a29f7
|
refs/heads/master
| 2022-12-02T12:56:52.824416
| 2020-08-13T07:41:10
| 2020-08-13T07:41:10
| 285,498,754
| 0
| 0
|
MIT
| 2020-08-06T07:04:18
| 2020-08-06T07:04:17
| null |
UTF-8
|
Python
| false
| false
| 210
|
py
|
import numpy as np
import example
A = np.array([[1,2,1],
[2,1,0],
[-1,1,2]])
B = 10
print(example.mul(A.astype(np.int ),int (B)))
print(example.mul(A.astype(np.float),float(B)))
|
[
"tom@geus.me"
] |
tom@geus.me
|
315603494a810e20704e702766b8df35b57a18c2
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/ACL_PyTorch/contrib/nlp/Rosetta_Resnet34_vd/Rosetta_Resnet34_vd_postprocess.py
|
6e17c6800c641359f12e3e433a4c33587569cab5
|
[
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 2,248
|
py
|
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
__dir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(__dir__)
sys.path.append(os.path.abspath(os.path.join(__dir__, 'PaddleOCR')))
import paddle
import numpy as np
import tools.program as program
from tqdm import tqdm
from ppocr.data import build_dataloader
from ppocr.metrics import build_metric
from ppocr.postprocess import build_post_process
def main(config, device, logger, vdl_writer):
valid_dataloader = build_dataloader(config, 'Eval', device, logger)
eval_class = build_metric(config['Metric'])
global_config = config['Global']
post_process_class = build_post_process(config['PostProcess'], global_config)
pbar = tqdm(
total=len(valid_dataloader),
desc='Postprocessing',
position=0,
leave=True)
for idx, batch in enumerate(valid_dataloader):
result_name = 'img_{}_0.bin'.format(idx)
result = os.path.join(config['results'], result_name)
preds = paddle.to_tensor(np.fromfile(result, dtype=np.float32).reshape(1, 25, 37))
batch = [item.numpy() for item in batch]
post_result = post_process_class(preds, batch[1])
eval_class(post_result, batch)
pbar.update(1)
pbar.close()
metric = eval_class.get_metric()
print(metric)
if __name__ == "__main__":
config, device, logger, vdl_writer = program.preprocess()
main(config, device, logger, vdl_writer)
|
[
"noreply@gitee.com"
] |
noreply@gitee.com
|
115cd19c16bf70d8b6fb245210f8ee067431cb67
|
adb6755eb1a3d91375e6b4e9b8c1afd07f85313b
|
/venv/Lib/site-packages/pandas/tests/tseries/offsets/test_offsets_properties.py
|
afb0549a23c3d833a3cad9b8f36e3d974929567b
|
[] |
no_license
|
Atwinenickson/Socialmediaclassifier-
|
af54b559569e80004c441fc90dc44b0ee945555d
|
05c5abbaba8694d9bf95d745ffca75c17ac69621
|
refs/heads/master
| 2022-12-15T01:33:18.073259
| 2019-06-07T15:38:18
| 2019-06-07T15:38:18
| 190,616,071
| 1
| 0
| null | 2022-12-08T05:13:29
| 2019-06-06T16:41:17
|
Python
|
UTF-8
|
Python
| false
| false
| 3,941
|
py
|
# -*- coding: utf-8 -*-
"""
Behavioral based tests for offsets and date_range.
This file is adapted from https://github.com/pandas-dev/pandas/pull/18761 -
which was more ambitious but less idiomatic in its use of Hypothesis.
You may wish to consult the previous version for inspiration on further
tests, or when trying to pin down the bugs exposed by the tests below.
"""
import warnings
from hypothesis import assume, given, strategies as st
from hypothesis.extra.dateutil import timezones as dateutil_timezones
from hypothesis.extra.pytz import timezones as pytz_timezones
import pytest
import pandas as pd
from pandas.tseries.offsets import (
BMonthBegin, BMonthEnd, BQuarterBegin, BQuarterEnd, BYearBegin, BYearEnd,
MonthBegin, MonthEnd, QuarterBegin, QuarterEnd, YearBegin, YearEnd)
# ----------------------------------------------------------------
# Helpers for generating random data
with warnings.catch_warnings():
warnings.simplefilter('ignore')
min_dt = pd.Timestamp(1900, 1, 1).to_pydatetime(),
max_dt = pd.Timestamp(1900, 1, 1).to_pydatetime(),
gen_date_range = st.builds(
pd.date_range,
start=st.datetimes(
# TODO: Choose the min/max values more systematically
min_value=pd.Timestamp(1900, 1, 1).to_pydatetime(),
max_value=pd.Timestamp(2100, 1, 1).to_pydatetime()
),
periods=st.integers(min_value=2, max_value=100),
freq=st.sampled_from('Y Q M D H T s ms us ns'.split()),
tz=st.one_of(st.none(), dateutil_timezones(), pytz_timezones()),
)
gen_random_datetime = st.datetimes(
min_value=min_dt,
max_value=max_dt,
timezones=st.one_of(st.none(), dateutil_timezones(), pytz_timezones())
)
# The strategy for each type is registered in conftest.py, as they don't carry
# enough runtime.txt information (e.g. type hints) to infer how to build them.
gen_yqm_offset = st.one_of(*map(st.from_type, [
MonthBegin, MonthEnd, BMonthBegin, BMonthEnd,
QuarterBegin, QuarterEnd, BQuarterBegin, BQuarterEnd,
YearBegin, YearEnd, BYearBegin, BYearEnd
]))
# ----------------------------------------------------------------
# Offset-specific behaviour tests
# Based on CI runs: Always passes on OSX, fails on Linux, sometimes on Windows
@pytest.mark.xfail(strict=False, reason='inconsistent between OSs, Pythons')
@given(gen_random_datetime, gen_yqm_offset)
def test_on_offset_implementations(dt, offset):
assume(not offset.normalize)
# check that the class-specific implementations of onOffset match
# the general case definition:
# (dt + offset) - offset == dt
compare = (dt + offset) - offset
assert offset.onOffset(dt) == (compare == dt)
@pytest.mark.xfail
@given(gen_yqm_offset, gen_date_range)
def test_apply_index_implementations(offset, rng):
# offset.apply_index(dti)[i] should match dti[i] + offset
assume(offset.n != 0) # TODO: test for that case separately
# rng = pd.date_range(start='1/1/2000', periods=100000, freq='T')
ser = pd.Series(rng)
res = rng + offset
res_v2 = offset.apply_index(rng)
assert (res == res_v2).all()
assert res[0] == rng[0] + offset
assert res[-1] == rng[-1] + offset
res2 = ser + offset
# apply_index is only for indexes, not series, so no res2_v2
assert res2.iloc[0] == ser.iloc[0] + offset
assert res2.iloc[-1] == ser.iloc[-1] + offset
# TODO: Check randomly assorted entries, not just first/last
@pytest.mark.xfail
@given(gen_yqm_offset)
def test_shift_across_dst(offset):
# GH#18319 check that 1) timezone is correctly normalized and
# 2) that hour is not incorrectly changed by this normalization
# Note that dti includes a transition across DST boundary
dti = pd.date_range(start='2017-10-30 12:00:00', end='2017-11-06',
freq='D', tz='US/Eastern')
assert (dti.hour == 12).all() # we haven't screwed up yet
res = dti + offset
assert (res.hour == 12).all()
|
[
"atwiinenicxon@gmail.com"
] |
atwiinenicxon@gmail.com
|
32dbcf217e368ae2de5dac068c7d6e37cadb9bce
|
6536946f7997b3eccda846505f1e30edd3af99d5
|
/mycroft_jarbas_utils/hivemind/clients/standalone_voice_client.py
|
b5c077751ba1fd4724ddc518a4974b33e3151e67
|
[] |
no_license
|
JarbasAl/ZZZ_mycroft_jarbas_utils
|
2a6e4d032675fc340c3ccec5eabcf94bacf06460
|
f05f99a19e88aa7d7d778e30058e759fedacbb25
|
refs/heads/master
| 2022-03-24T19:37:56.187643
| 2019-12-13T02:57:42
| 2019-12-13T02:57:42
| 122,380,983
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,114
|
py
|
from twisted.internet import reactor, ssl
from autobahn.twisted.websocket import WebSocketClientFactory, \
WebSocketClientProtocol
from twisted.internet.protocol import ReconnectingClientFactory
from mycroft_jarbas_utils.hivemind.clients.speech.listener import RecognizerLoop
from threading import Thread
conf = {
"listener": {
"sample_rate": 16000,
"channels": 1,
"record_wake_words": False,
"record_utterances": False,
"phoneme_duration": 120,
"multiplier": 1.0,
"energy_ratio": 1.5,
"wake_word": "hey mycroft",
"stand_up_word": "wake up"
},
"hotwords": {
"hey mycroft": {
"module": "pocketsphinx",
"phonemes": "HH EY . M AY K R AO F T",
"threshold": 1e-90,
"lang": "en-us"
},
"thank you": {
"module": "pocketsphinx",
"phonemes": "TH AE NG K . Y UW .",
"threshold": 1e-1,
"listen": False,
"utterance": "thank you",
"active": True,
"sound": "",
"lang": "en-us"
},
"wake up": {
"module": "pocketsphinx",
"phonemes": "W EY K . AH P",
"threshold": 1e-20,
"lang": "en-us"
}
},
"stt": {
"deepspeech_server": {
"uri": "http://localhost:8080/stt"
},
"kaldi": {
"uri": "http://localhost:8080/client/dynamic/recognize"
}
}
}
import json
import sys
import logging
import base64
logger = logging.getLogger("Standalone_Mycroft_Client")
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel("INFO")
platform = "JarbasVoiceClientv0.1"
class JarbasVoiceClientProtocol(WebSocketClientProtocol):
def onConnect(self, response):
logger.info("Server connected: {0}".format(response.peer))
def onOpen(self):
logger.info("WebSocket connection open. ")
self.loop = RecognizerLoop(conf)
self.listen = Thread(target=self.start_listening)
self.listen.setDaemon(True)
self.listen.start()
def handle_record_begin(self):
logger.info("Begin Recording...")
def handle_record_end(self):
logger.info("End Recording...")
def handle_awoken(self):
""" Forward mycroft.awoken to the messagebus. """
logger.info("Listener is now Awake: ")
def handle_wakeword(self, event):
logger.info("Wakeword Detected: " + event['utterance'])
def handle_utterance(self, event):
context = {'client_name': platform, "source": self.peer + ":speech",
'destinatary': "https_server"}
msg = {"data": {"utterances": event['utterances'], "lang": "en-us"},
"type": "recognizer_loop:utterance",
"context": context}
self.send(msg)
def handle_unknown(self):
logger.info('mycroft.speech.recognition.unknown')
def handle_hotword(self, event):
config = conf.get("listener", {})
ww = config.get("wake_word", "hey mycroft")
suw = config.get("stand_up_word", "wake up")
if event["hotword"] != ww and event["hotword"] != suw:
logger.info("Hotword Detected: " + event['hotword'])
def handle_sleep(self):
self.loop.sleep()
def handle_wake_up(self, event):
self.loop.awaken()
def handle_mic_mute(self, event):
self.loop.mute()
def handle_mic_unmute(self, event):
self.loop.unmute()
def handle_audio_start(self, event):
"""
Mute recognizer loop
"""
self.loop.mute()
def handle_audio_end(self, event):
"""
Request unmute, if more sources has requested the mic to be muted
it will remain muted.
"""
self.loop.unmute() # restore
def handle_stop(self, event):
"""
Handler for mycroft.stop, i.e. button press
"""
self.loop.force_unmute()
def start_listening(self):
self.loop.on('recognizer_loop:utterance', self.handle_utterance)
self.loop.on('recognizer_loop:record_begin', self.handle_record_begin)
self.loop.on('recognizer_loop:awoken', self.handle_awoken)
self.loop.on('recognizer_loop:wakeword', self.handle_wakeword)
self.loop.on('recognizer_loop:hotword', self.handle_hotword)
self.loop.on('recognizer_loop:record_end', self.handle_record_end)
self.loop.run()
def stop_listening(self):
self.loop.remove_listener('recognizer_loop:utterance', self.handle_utterance)
self.loop.remove_listener('recognizer_loop:record_begin', self.handle_record_begin)
self.loop.remove_listener('recognizer_loop:awoken', self.handle_awoken)
self.loop.remove_listener('recognizer_loop:wakeword', self.handle_wakeword)
self.loop.remove_listener('recognizer_loop:hotword', self.handle_hotword)
self.loop.remove_listener('recognizer_loop:record_end', self.handle_record_end)
self.listen.join(0)
def onMessage(self, payload, isBinary):
if not isBinary:
msg = json.loads(payload)
if msg.get("type", "") == "speak":
utterance = msg["data"]["utterance"]
logger.info("Output: " + utterance)
else:
pass
def send(self, msg):
msg = json.dumps(msg)
self.sendMessage(msg, False)
def onClose(self, wasClean, code, reason):
logger.info("WebSocket connection closed: {0}".format(reason))
self.stop_listening()
class JarbasVoiceClientFactory(WebSocketClientFactory, ReconnectingClientFactory):
protocol = JarbasVoiceClientProtocol
def __init__(self, *args, **kwargs):
super(JarbasVoiceClientFactory, self).__init__(*args, **kwargs)
self.status = "disconnected"
self.client = None
# websocket handlers
def clientConnectionFailed(self, connector, reason):
logger.info(
"Client connection failed: " + str(reason) + " .. retrying ..")
self.status = "disconnected"
self.retry(connector)
def clientConnectionLost(self, connector, reason):
logger.info(
"Client connection lost: " + str(reason) + " .. retrying ..")
self.status = "disconnected"
self.retry(connector)
def start_voice_client(host="0.0.0.0", port=5678, name="standalone voice client", api="test_key", config=None):
global conf
conf = config or conf
authorization = name + ":" + api
usernamePasswordDecoded = authorization
api = base64.b64encode(usernamePasswordDecoded)
headers = {'authorization': api}
adress = u"wss://" + host + u":" + str(port)
factory = JarbasVoiceClientFactory(adress, headers=headers,
useragent=platform)
factory.protocol = JarbasVoiceClientProtocol
contextFactory = ssl.ClientContextFactory()
reactor.connectSSL(host, port, factory, contextFactory)
reactor.run()
if __name__ == '__main__':
start_voice_client()
|
[
"jarbasai@mailfence.com"
] |
jarbasai@mailfence.com
|
61c7e594c9592fed0407aa4a923ed6db00bd2fae
|
0e477667e6ce65e0799a5605b8f7f9cd778ff81f
|
/P2/dino_game.py
|
2da57b2b18949ee9d6558768c7a7d094a4753f69
|
[] |
no_license
|
rohitaswchoudhary/py_projects
|
2e79eb88e8b2d504e9f2c86e5cdedf6ba1470cb9
|
adde531b1311a1f4e4dd1d5bc57f5f8b743e4f7f
|
refs/heads/main
| 2023-04-16T08:40:05.614405
| 2021-04-29T07:43:21
| 2021-04-29T07:43:21
| 337,432,934
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 791
|
py
|
import pyautogui
import time
from PIL import Image, ImageGrab
def hit(key):
pyautogui.keyDown(key)
def is_collide(data):
for i in range(300,415):
for j in range(410,560):
if data[i,j]<40:
hit("down")
return True
for i in range(300,415):
for j in range(563,650):
if data [i,j]<40:
hit('up')
return True
return False
if __name__ == "__main__":
time.sleep(10)
hit('up')
while True:
image= ImageGrab.grab().convert('L')
data = image.load()
is_collide(data)
# print(asarray(image))
for i in range (300,415):
for j in range(410,563):
data[i,j] = 171
image.show()
break
|
[
"you@example.com"
] |
you@example.com
|
38a04f01a5160d296ff561122a00bddcde966b66
|
5240574e3e89cfd8393141e08479aac6f314acc8
|
/L2/helloCont.sh
|
bfb2b4bdad7774191024f82f87f35458d3afc6a6
|
[] |
no_license
|
linnil1/2018_LAB_Tutorial
|
1d1fdce3c37e1881715de0c92ea7ad7a66e49e2f
|
2d7d9418f29915c828e4f2561709bd731dd0cab8
|
refs/heads/master
| 2020-03-23T02:08:03.535006
| 2018-07-22T06:21:04
| 2018-07-22T06:21:04
| 140,957,748
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 268
|
sh
|
#!/usr/bin/python3
import time
import sys
num = 0
while True:
if num % 3:
print("hello -- " + str(num))
sys.stdout.flush()
else:
print("hello -- " + str(num), file=sys.stderr)
sys.stderr.flush()
num += 1
time.sleep(0.1)
|
[
"linnil1.886@gmail.com"
] |
linnil1.886@gmail.com
|
f4283609b0b15e9b8ea4c1d8aee1778707b75c26
|
34599a66861f7d95a5740eeb5329ea77014e18d4
|
/problems_solving/project-euler/pro043_sum_pandigital.py
|
311f0e0f4f790452348cb363b5436bcaf2be503e
|
[] |
no_license
|
laolee010126/algorithm-with-python
|
f0f5f1bc3cbe374ccbb59e10ac639674c44ae743
|
89ff0c47a6d8b0cd5b31a25bb3981b8e90971f19
|
refs/heads/master
| 2022-04-01T17:38:36.199309
| 2020-01-14T01:54:22
| 2020-01-14T01:54:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 979
|
py
|
def get_digit(n, start, length):
n = str(n)
return n[start-1:start-1+length]
def sum_pandigial():
LENGTH = 3
used = [False] * 10
starts = [2, 3, 4, 5, 6, 7, 8]
dividers = [2, 3, 5, 7, 11, 13, 17]
pandigitals = []
ans = []
def generate_pandigitals(tmp, used, left):
nonlocal pandigitals
if not left:
pandigitals.append(tmp)
return
for n in range(10):
if not used[n]:
used[n] = True
generate_pandigitals(tmp + str(n), used, left-1)
used[n] = False
generate_pandigitals('', used, 10)
for pan in pandigitals:
if all(int(get_digit(pan, start, LENGTH)) % divider == 0 for start, divider in zip(starts,
dividers)):
ans.append(int(pan))
return ans
if __name__ == '__main__':
ans = sum_pandigial()
print(ans)
|
[
"shoark7@gmail.com"
] |
shoark7@gmail.com
|
51f9546b94c3bcb4cd440a1de34e1b7e0459997f
|
4631798b64f2118b7d8e64483a14d7485163358b
|
/8.6.1_making_pizzas.py
|
03606d0835e6cdc7d73d325e5776c21e070d9050
|
[] |
no_license
|
royrowe/python
|
288680aba27b8c2d46368250b45fb1672427fe6a
|
dc7cebd56aa1bee7b2afd91e3a2a4b03f1775ba5
|
refs/heads/master
| 2020-04-15T20:05:02.587794
| 2019-01-10T02:53:08
| 2019-01-10T02:53:08
| 164,978,105
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 229
|
py
|
#!/usr/bin/env python
'''
@File :8.6.1_making_pizzas.py
@Copyright :luoming
@Date :
@Desc :
'''
import pizzaim
pizzaim.make_pizza(16, 'pepperoni')
pizzaim.make_pizza(12, 'mushrooms', 'green pepers','extra cheese')
|
[
"your email"
] |
your email
|
4a15925f7f82e3ae829257f3fb188b69d1c18d48
|
3df98e7d4551220e3b09d122e8d6897ca572d9f3
|
/basic programs/10.py
|
f86b5bacd38650a07e43778f169c04f12db69575
|
[] |
no_license
|
Madhav2108/Python-
|
2e4bf778348786d8dd082e3e1cdd7acb41d9f559
|
fdfdf944a96d83352979bc23c3b65aac7bd41d26
|
refs/heads/master
| 2023-03-30T21:37:26.326980
| 2021-03-31T17:40:49
| 2021-03-31T17:40:49
| 273,668,175
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 181
|
py
|
U=int(input("Enter the Units"))
if U<100:
print("Charges are Rs.1perday")
elif U<200:
print("Charges are Rs.2perday")
else:
print("Charges are Rs.4perday")
|
[
"noreply@github.com"
] |
Madhav2108.noreply@github.com
|
a94799987815dbae47be87fd753fc0b8e50c3e3e
|
1d5b2b72d322dd154a8efb547290ad5abb1fd098
|
/work_dir/autoencoder_test/prepare_nn.py
|
f10b3551f9bc5475708ae0a016220f1b6df6eaa6
|
[] |
no_license
|
hxzwd/drafts
|
6b593b50cae309c02495a8aff28719f7b636962d
|
478f4a4c399ab0c7c3f8f6e22d13131488716e4d
|
refs/heads/master
| 2020-04-28T01:42:58.998610
| 2019-05-05T17:49:48
| 2019-05-05T17:49:48
| 174,868,171
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 689
|
py
|
from keras.layers import Input, Dense, Flatten, Reshape
from keras.models import Model
def create_dense_autoencoder():
encoding_dim = 49
input_img = Input(shape = (28, 28, 1))
flat_img = Flatten()(input_img)
encoded = Dense(encoding_dim, activation = "relu")(flat_img)
input_encoded = Input(shape = (encoding_dim,))
flat_decoded = Dense(28 * 28, activation = "sigmoid")(input_encoded)
decoded = Reshape((28, 28, 1))(flat_decoded)
encoder = Model(input_img, encoded, name = "encoder")
decoder = Model(input_encoded, decoded, name = "decoder")
autoencoder = Model(input_img, decoder(encoder(input_img)), name = "autoencoder")
return encoder, decoder, autoencoder
|
[
"="
] |
=
|
5e28913dc9429bda9278535096ad84a6da40a243
|
315006dde839c66dab61757f5073466ef63883b6
|
/imagepy/core/manager/languagemanager.py
|
1b9b348f99ed51e5ab4b8044a585b22061c156df
|
[
"BSD-2-Clause"
] |
permissive
|
clickinfinite/imagepy
|
0777b819e95840a6e41dafc623643e22dfc44adf
|
5e6425a08ce3a0d9c2ab1b6e749b02cb9362e7f4
|
refs/heads/master
| 2021-07-25T20:29:26.767885
| 2017-11-07T16:33:00
| 2017-11-07T16:33:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,709
|
py
|
import os
from ... import root_dir
from glob import glob
class LanguageManager:
plgs = []
langs = {}
cur = None
filename = os.path.join(root_dir,'data/language/*.dic')
@classmethod
def set(cls, cur):
cls.cur = None if cur=='English' else cls.langs[cur]
curfile = open(os.path.join(root_dir,'data/language/cur.txt'), 'w', encoding='utf-8')
cur = curfile.write(cur)
curfile.close()
@classmethod
def read(cls):
path = os.path.join(root_dir,'data/language/*.dic')
for name in glob(path):
pkl_file = open(name, 'r', encoding='utf-8')
fp, fn = os.path.split(name)
fn, fe = os.path.splitext(fn)
cls.langs[fn] = {}
for line in pkl_file.readlines():
k,v = line.replace('\n', '').split(':')
cls.langs[fn][k] = v
pkl_file.close()
curfile = os.path.join(root_dir,'data/language/cur.txt')
if not os.path.exists(curfile): return
curfile = open(os.path.join(root_dir,'data/language/cur.txt'), 'r', encoding='utf-8')
cur = curfile.read()
curfile.close()
if cur in cls.langs: cls.cur = cls.langs[cur]
@classmethod
def write(cls):
for key in cls.langs:
dic = cls.langs[key]
titles = sorted(dic.keys())
pkl_file = open(os.path.join(root_dir,'data/language/%s.dic'%key), 'w', encoding='utf-8')
for i in titles:
pkl_file.write('%s:%s\n'%(i,dic[i]))
pkl_file.close()
@classmethod
def add(cls, key=None):
if not key is None and not ':' in key:
if not key in cls.plgs:cls.plgs.append(key)
return
titles = cls.plgs
for key in cls.langs:
dic = cls.langs[key]
for i in titles:
if not ':' in i and not i in dic: dic[i] = '--'
cls.write()
@classmethod
def rm(cls):
titles = cls.plgs
for key in cls.langs:
dic = cls.langs[key]
for i in dic:
if not i in titles: del dic[i]
cls.write()
@classmethod
def newdic(cls, key):
cls.langs[key] = {}
for i in cls.plgs:
if not ':' in i: cls.langs[key][i] = '--'
@classmethod
def get(cls, key):
if not cls.cur is None and key in cls.cur:
if cls.cur[key]!='--':
return cls.cur[key]
return key
LanguageManager.read()
if __name__ == '__main__':
#ShotcutManager.set('c',[1,2,3])
ShotcutManager.rm('c')
print(ShotcutManager.shotcuts)
ShotcutManager.write()
|
[
"imagepy@sina.com"
] |
imagepy@sina.com
|
973212974c4e2f21d28c3c8e897643227ad9a0af
|
3a527d62947ad6d01ebfc8932958636270bc055a
|
/contact/validations.py
|
4b8254ea28a6c080b75e8528388aa8b575f5bce0
|
[] |
no_license
|
Iamprakashkhatri/contactcrud
|
5bc9209dc104914608c1c9f86604c7bfadbe0fa6
|
ffd2d18cccde5c7dd9c2dd67382564847c4f6eff
|
refs/heads/master
| 2020-09-03T20:03:00.759839
| 2019-11-06T05:08:32
| 2019-11-06T05:08:32
| 219,555,483
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 447
|
py
|
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
def validate_domainonly_email(value):
if not "your@domain.com" in value:
raise ValidationError(_("Sorry,the email submitted is invalid"))
return value
Blacklisted=['abc','new']
def validate_blacklisted(value):
if value in Blacklisted:
raise ValidationError(_("Sorry,the value is not valid."))
return value
|
[
"prakash"
] |
prakash
|
4d767e75c1a3e17efffcd6541128012239313ac2
|
108fc2873b5c07e4ad9515adc16bc8e9fdf7d021
|
/smorest_sfs/utils/sqla.py
|
e650ad9a22a4a200e49a5264a8c8d62e89b89d0c
|
[
"Apache-2.0"
] |
permissive
|
ssfdust/yt-media
|
4ac5eba6a25830268f42b951e8307bb57e7baeeb
|
36c3d1977df5851d8df54846f0bc84be2b86e962
|
refs/heads/master
| 2021-08-08T09:40:31.241228
| 2020-05-11T03:11:20
| 2020-05-11T03:11:20
| 175,938,603
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,269
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import namedtuple
from typing import List, Optional
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import Table, UniqueConstraint, inspect
from smorest_sfs.extensions.sqla import Model
RelateTableArgs = namedtuple(
"RelateTableArgs", ["tablename", "related_key", "the_ohter_related_key"]
)
class AttrHistory:
def __init__(self, added: Optional[List[Model]], deleted: Optional[List[Model]]):
self.added: List[Model] = added or []
self.deleted: List[Model] = deleted or []
def create_relation_table(db: SQLAlchemy, table_args: RelateTableArgs) -> Table:
return db.Table(
table_args.tablename,
db.Column(table_args.related_key, db.Integer(), nullable=False),
db.Column(table_args.the_ohter_related_key, db.Integer(), nullable=False),
UniqueConstraint(table_args.related_key, table_args.the_ohter_related_key),
)
def get_histroy(model: Model, attr: str) -> AttrHistory:
model_state = inspect(model)
attr_state = getattr(model_state.attrs, attr)
attr_hist = attr_state.history
if not attr_hist.has_changes():
raise ValueError("No changes found")
return AttrHistory(attr_hist.added, attr_hist.deleted)
|
[
"ssfdust@gmail.com"
] |
ssfdust@gmail.com
|
f8311679b31b7b468a2b34ecffa528474554e3af
|
e3e5efe47f51f71b28fa6d7d5b2311f25d64b580
|
/analytics/migrations/0004_auto_20191002_1350.py
|
189d799a1f81daee8ef86dcdbb73ce505ba25707
|
[
"BSD-3-Clause"
] |
permissive
|
boxed/analytics
|
be9169584459434dd2b0099d8ff2ff0755086a95
|
b1237de9d13ee3509d524d06c561342071090331
|
refs/heads/master
| 2023-04-15T01:38:03.702986
| 2023-03-22T05:38:22
| 2023-03-22T05:38:22
| 210,958,980
| 1
| 0
|
BSD-3-Clause
| 2023-03-22T05:38:24
| 2019-09-25T23:30:59
|
Python
|
UTF-8
|
Python
| false
| false
| 529
|
py
|
# Generated by Django 2.2.5 on 2019-10-02 13:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('analytics', '0003_auto_20190929_1620'),
]
operations = [
migrations.AlterField(
model_name='referrers',
name='page_url',
field=models.URLField(db_index=True),
),
migrations.AlterUniqueTogether(
name='referrers',
unique_together={('page_url', 'referrer')},
),
]
|
[
"boxed@killingar.net"
] |
boxed@killingar.net
|
3752bd2b4cb84fb188177ba0d85a16a73e44cd8c
|
8ea6b3429ec95420029a188e898cc63d6acac871
|
/fnb/views.py
|
44d1330b9653e6cb948596d5c33847e862a19beb
|
[] |
no_license
|
chidimo/fnb
|
131cbbcb44778fae607929e55515907bd15125ba
|
1ecbed6ce55f7ce368f8909975c2ba28e908d1c3
|
refs/heads/master
| 2023-04-10T03:37:40.886743
| 2021-04-23T18:04:26
| 2021-04-23T18:04:26
| 359,479,691
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 223
|
py
|
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
# @login_required
def home(request):
template = "home.html"
context = {}
return render(request, template, context)
|
[
"orjichidi95@gmail.com"
] |
orjichidi95@gmail.com
|
6c2bbef21302fc2cebc465ed246a30dbf26672cb
|
a66460a46611483dfbdc94c7996893f427e60d97
|
/ansible/my_env/lib/python2.7/site-packages/ansible/modules/cloud/vmware/vmware_datastore_cluster.py
|
4e635ef511387fe7f9fee40439d8fbc1498d0585
|
[
"MIT",
"GPL-3.0-or-later"
] |
permissive
|
otus-devops-2019-02/yyashkin_infra
|
06b57807dde26f94f501828c07503d6bf1d70816
|
0cd0c003884155ac922e3e301305ac202de7028c
|
refs/heads/master
| 2020-04-29T02:42:22.056724
| 2019-05-15T16:24:35
| 2019-05-15T16:24:35
| 175,780,718
| 0
| 0
|
MIT
| 2019-05-15T16:24:36
| 2019-03-15T08:37:35
|
HCL
|
UTF-8
|
Python
| false
| false
| 5,606
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Ansible Project
# Copyright (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vmware_datastore_cluster
short_description: Manage VMware vSphere datastore clusters
description:
- This module can be used to add and delete datastore cluster in given VMware environment.
- All parameters and VMware object values are case sensitive.
version_added: 2.6
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.0, 6.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
datacenter_name:
description:
- The name of the datacenter.
required: True
datastore_cluster_name:
description:
- The name of the datastore cluster.
required: True
state:
description:
- If the datastore cluster should be present or absent.
choices: [ present, absent ]
default: present
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Create datastore cluster
vmware_datastore_cluster:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
datacenter_name: '{{ datacenter_name }}'
datastore_cluster_name: '{{ datastore_cluster_name }}'
state: present
delegate_to: localhost
- name: Delete datastore cluster
vmware_datastore_cluster:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
datacenter_name: '{{ datacenter_name }}'
datastore_cluster_name: '{{ datastore_cluster_name }}'
state: absent
delegate_to: localhost
'''
RETURN = """
result:
description: information about datastore cluster operation
returned: always
type: string
sample: "Datastore cluster 'DSC2' created successfully."
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec, wait_for_task
from ansible.module_utils._text import to_native
class VMwareDatastoreClusterManager(PyVmomi):
def __init__(self, module):
super(VMwareDatastoreClusterManager, self).__init__(module)
datacenter_name = self.params.get('datacenter_name')
self.datacenter_obj = self.find_datacenter_by_name(datacenter_name)
if not self.datacenter_obj:
self.module.fail_json(msg="Failed to find datacenter '%s' required"
" for managing datastore cluster." % datacenter_name)
self.datastore_cluster_name = self.params.get('datastore_cluster_name')
self.datastore_cluster_obj = self.find_datastore_cluster_by_name(self.datastore_cluster_name)
def ensure(self):
"""
Function to manage internal state of datastore cluster
"""
results = dict(changed=False, result='')
state = self.module.params.get('state')
if self.datastore_cluster_obj:
if state == 'present':
results['result'] = "Datastore cluster '%s' already available." % self.datastore_cluster_name
elif state == 'absent':
# Delete datastore cluster
if not self.module.check_mode:
task = self.datastore_cluster_obj.Destroy_Task()
changed, result = wait_for_task(task)
else:
changed = True
if changed:
results['result'] = "Datastore cluster '%s' deleted successfully." % self.datastore_cluster_name
results['changed'] = changed
else:
self.module.fail_json(msg="Failed to delete datastore cluster '%s'." % self.datastore_cluster_name)
else:
if state == 'present':
# Create datastore cluster
if not self.module.check_mode:
try:
self.datacenter_obj.datastoreFolder.CreateStoragePod(name=self.datastore_cluster_name)
except Exception as generic_exc:
self.module.fail_json(msg="Failed to create datstore cluster"
" '%s' due to %s" % (self.datastore_cluster_name,
to_native(generic_exc)))
results['changed'] = True
results['result'] = "Datastore cluster '%s' created successfully." % self.datastore_cluster_name
elif state == 'absent':
results['result'] = "Datastore cluster '%s' not available or already deleted." % self.datastore_cluster_name
self.module.exit_json(**results)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
dict(
datacenter_name=dict(type='str', required=True),
datastore_cluster_name=dict(type='str', required=True),
state=dict(default='present', choices=['present', 'absent'], type='str'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
datastore_cluster_mgr = VMwareDatastoreClusterManager(module)
datastore_cluster_mgr.ensure()
if __name__ == '__main__':
main()
|
[
"theyashkins@gmail.com"
] |
theyashkins@gmail.com
|
e40ab14da91a94eef3ca5f0162f555fd7f3063e2
|
1f689e448d8b510ea6575590cb6920048b4e9aea
|
/leetcode/237_delete_node_in_a_linked_list.py
|
18f860fadd7f55ef02c8c54a766f4dd7909583d7
|
[] |
no_license
|
lijenpan/python
|
52c6061ff90c611efd039b1858339edbefdb5ad0
|
7f67045a83bd2592ccc399420194094fb78404b8
|
refs/heads/master
| 2020-05-30T10:53:15.634090
| 2016-12-02T20:50:28
| 2016-12-02T20:50:28
| 7,646,477
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 706
|
py
|
"""
Write a function to delete a node (except the tail) in a singly linked list, given only access to that node.
Supposed the linked list is 1 -> 2 -> 3 -> 4 and you are given the third node with value 3, the linked list
should become 1 -> 2 -> 4 after calling your function.
==============================
I was confused about what the question was asking without giving the root. Then I realize the question
was simply asking to point the given node to the next one. Thus delete the given node. Simple.
"""
def deleteNode(node):
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
node.val = node.next.val
node.next = node.next.next
|
[
"noreply@github.com"
] |
lijenpan.noreply@github.com
|
6b1995c24faa13aa921aa46b8fc645f211e7c15b
|
350796fa13c98af7a5a2e8873e5cb74e4f29043c
|
/redis_test/__init__.py
|
eb36980e51a8b42aa8c379c220023807fec5d382
|
[] |
no_license
|
dajun928/Python36
|
3babdb47124cace844bf8d7b8054c1c6181a0a9f
|
8974fc9c9c808e10fef02ed4c061bfbac5a0961f
|
refs/heads/master
| 2022-12-12T17:47:47.724277
| 2021-01-10T03:29:45
| 2021-01-10T03:29:45
| 174,575,956
| 0
| 0
| null | 2021-06-01T23:57:50
| 2019-03-08T16:59:19
|
HTML
|
UTF-8
|
Python
| false
| false
| 174
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
@version :
@file : __init__.py.py
@time : 2019/07/13 23:14:22
@func :
"""
import platform
print(platform.python_version())
|
[
"1663177102@qq.com"
] |
1663177102@qq.com
|
1b747ea8a43a236bb90f881c0f94541e88f10226
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/time/great_company/person/think_thing/large_person.py
|
404255285e129d19a24dfea618205b314abd8f64
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
py
|
#! /usr/bin/env python
def different_point_and_important_person(str_arg):
own_life(str_arg)
print('small_government')
def own_life(str_arg):
print(str_arg)
if __name__ == '__main__':
different_point_and_important_person('tell_high_fact_about_next_government')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
773f91abb27727fe00fca57a9a0057b794f7b0a2
|
26f6313772161851b3b28b32a4f8d255499b3974
|
/Python/HowManyApplesCanYouPutintotheBasket.py
|
b7e35d3681a27ba6f75f3b9c535237f76e621a1e
|
[] |
no_license
|
here0009/LeetCode
|
693e634a3096d929e5c842c5c5b989fa388e0fcd
|
f96a2273c6831a8035e1adacfa452f73c599ae16
|
refs/heads/master
| 2023-06-30T19:07:23.645941
| 2021-07-31T03:38:51
| 2021-07-31T03:38:51
| 266,287,834
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,021
|
py
|
"""
You have some apples, where arr[i] is the weight of the i-th apple. You also have a basket that can carry up to 5000 units of weight.
Return the maximum number of apples you can put in the basket.
Example 1:
Input: arr = [100,200,150,1000]
Output: 4
Explanation: All 4 apples can be carried by the basket since their sum of weights is 1450.
Example 2:
Input: arr = [900,950,800,1000,700,800]
Output: 5
Explanation: The sum of weights of the 6 apples exceeds 5000 so we choose any 5 of them.
Constraints:
1 <= arr.length <= 10^3
1 <= arr[i] <= 10^3
"""
class Solution:
def maxNumberOfApples(self, arr):
capacity = 5000
arr = sorted(arr)
counts = 0
weights = 0
for n in arr:
weights += n
if weights > capacity:
break
else:
counts += 1
return counts
s = Solution()
arr = [100,200,150,1000]
print(s.maxNumberOfApples(arr))
arr = [900,950,800,1000,700,800]
print(s.maxNumberOfApples(arr))
|
[
"here0009@163.com"
] |
here0009@163.com
|
87ba885929017189ab742b8e8999ce8d820bb5f2
|
987ead1eb0877b9bdea16f3ee50bf19d5fe204bd
|
/matplotlib/fig_axes_customize_simple.py
|
86517664a5dd13b23da4617281156274fa684c85
|
[] |
no_license
|
ZHX1996/project
|
da62151e32254848a02292a2f9bdb1db17850d67
|
5a57be55cf173dde7e5a135a9cf1cfbc9a63a158
|
refs/heads/master
| 2021-07-15T11:36:02.412231
| 2020-05-15T08:51:34
| 2020-05-15T08:51:34
| 94,512,901
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 489
|
py
|
import numpy as np
import matplotlib.pyplot as plt
fig = plt.figure()
rect = fig.patch
rect.set_facecolor('lightgoldenrodyellow')
ax1 = fig.add_axes([0.1, 0.3, 0.4, 0.4])
rect = ax1.patch
rect.set_facecolor('lightslategray')
for label in ax1.xaxis.get_ticklabels():
label.set_color('red')
label.set_rotation(45)
label.set_fontsize(16)
for line in ax1.yaxis.get_ticklines():
line.set_color('green')
line.set_markersize(25)
line.set_markeredgewidth(3)
plt.show()
|
[
"1365370292@qq.com"
] |
1365370292@qq.com
|
e6979701b78027a1810a73e1e1afa3b9d8e5a65b
|
d6a209a45bb14846e47b07a77641de26e073e9fb
|
/drill14.py
|
a5fe48baad84fb34879066dbc046251dce45ee47
|
[] |
no_license
|
fan-bingbing/pyexcercise
|
ca711212af0f5df07a57d253190f63cf4a0dd887
|
ddb32bfae1891cda9f0ef0effd43a95a95e1d043
|
refs/heads/master
| 2021-03-13T19:49:24.042399
| 2020-03-17T04:24:21
| 2020-03-17T04:24:21
| 246,705,823
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 612
|
py
|
from sys import argv
script, user_name = argv
# don't forget how to run this kind of file, pass argv in command line.
prompt = '>'
# a handy way of changing prompt icon
print(f"Hi {user_name}, I'm the {script} script.")
print("I'd like to ask you a few questions.")
print(f"Do you like me {user_name}?")
likes = input(prompt)
print(f"where do you live {user_name}?")
lives = input(prompt)
print("What kind of computer do you have?")
computer = input(prompt)
print(f"""
Alright, so you said {likes} about liking me.
You live in {lives}. Not sure where that is.
And you have a {computer} computer. Nice.
""")
|
[
"fanyu1980@hotmail.com"
] |
fanyu1980@hotmail.com
|
c2bb5f0ed30a64641673adc9923cb1f29d84b06d
|
c30906c50ea0fbcccbf080b89eca84edb9f04673
|
/DaVinci_scripts/MC/twoBody/KK/job_2016_down.py
|
766fe4d29a71ab572fa7135bf3f63551a126e3ce
|
[] |
no_license
|
hpullen/DPhil_B02DKstar_analysis
|
543661c4c2e978fb7f60a1d81f27bc660710994d
|
651b3f333d3959e78512fc294afa334e3ea26fd9
|
refs/heads/master
| 2023-07-15T17:38:53.009366
| 2021-08-25T19:40:42
| 2021-08-25T19:40:42
| 107,555,335
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 501
|
py
|
j = Job(name='KK_2016_down',
backend=Dirac(),
application=DaVinci(version = "v41r3"),
splitter=SplitByFiles(filesPerJob = 10, maxFiles = -1)
)
j.application.optsfile = '2016_KK.py'
BK = BKQuery(path=('//MC/2016/Beam6500GeV-2016-MagDown-Nu1.6-25ns-Pythia8/Sim09b/'
'Trig0x6138160F/Reco16/Turbo03/Stripping26NoPrescalingFlagged/'
'11164021/ALLSTREAMS.MDST'))
j.inputdata = BK.getDataset()
j.comment = '2016 MC KK down'
j.submit()
|
[
"hannah.pullen@physics.ox.ac.uk"
] |
hannah.pullen@physics.ox.ac.uk
|
8a0ce920566673a8897bd95bdffab20b9ca62d2e
|
5496b9682dec06925f3572e64d7f1eb48d78ebe1
|
/src/visualization/FragmentationKaandorpPartial/FragmentationKaandorpPartial_timeseries.py
|
e0c5e0fe4cbbd37de19ad52a5dd362f293e1991e
|
[] |
no_license
|
VictorOnink/Lagrangian-Transport-Scenarios
|
64bec8b992e2909a05b0258524dbae25f967ea29
|
586bcecc42d6a7f4f299507da8f0cb29c8d71a2e
|
refs/heads/master
| 2023-04-14T12:22:29.309172
| 2022-07-11T18:46:38
| 2022-07-11T18:46:38
| 297,894,637
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,986
|
py
|
import settings
import utils
import visualization.visualization_utils as vUtils
import matplotlib.pyplot as plt
import string
from datetime import datetime, timedelta
class FragmentationKaandorpPartial_timeseries:
def __init__(self, scenario, figure_direc, shore_time, lambda_frag, rho, simulation_length, weight,
input='LebretonDivision'):
# Simulation parameters
self.scenario = scenario
self.shore_time = shore_time
self.lambda_frag = lambda_frag
self.rho = rho
self.simulation_length = simulation_length
self.class_num = settings.SIZE_CLASS_NUMBER
self.weight = weight
self.input = input
# Data parameters
self.output_direc = figure_direc + 'timeseries/'
self.data_direc = settings.DATA_OUTPUT_DIREC + 'timeseries/FragmentationKaandorpPartial/'
utils.check_direc_exist(self.output_direc)
self.prefix = 'timeseries'
self.beach_state_list = ['beach', 'adrift', 'total']
# Figure parameters
self.figure_size = (20, 14)
self.figure_shape = (self.beach_state_list.__len__(), 1)
self.ax_label_size = 16
self.ax_ticklabel_size = 14
self.legend_size = 14
self.y_label = 'Mass'
self.x_label = 'Time'
self.xmin, self.xmax = datetime(settings.STARTYEAR, 1, 1), \
datetime(settings.STARTYEAR + self.simulation_length, 1, 1)
self.ymin, self.ymax = 1e-3, 6500
self.ax_range = self.xmax, self.xmin, self.ymax, self.ymin
self.number_of_plots = self.beach_state_list.__len__()
def plot(self):
# Loading data
timeseries_dict = {}
for size_class in range(self.class_num):
timeseries_dict[size_class] = {}
data_dict = vUtils.FragmentationKaandorpPartial_load_data(scenario=self.scenario, prefix=self.prefix,
data_direc=self.data_direc,
shore_time=self.shore_time,
lambda_frag=self.lambda_frag, rho=self.rho,
postprocess=True, input=self.input)
for beach_state in self.beach_state_list:
timeseries_dict[size_class][beach_state] = data_dict[beach_state][size_class][self.weight]
# creating a time axis
time_list = []
for time in data_dict['time']:
time_list.append(datetime(settings.STARTYEAR, 1, 1, 12) + timedelta(seconds=time))
# Creating figure
ax = vUtils.base_figure(fig_size=self.figure_size, ax_range=self.ax_range, y_label=self.y_label,
x_label=self.x_label, ax_label_size=self.ax_label_size,
ax_ticklabel_size=self.ax_ticklabel_size, shape=self.figure_shape,
plot_num=self.number_of_plots, legend_axis=True, log_yscale=True, x_time_axis=True,
width_ratios=[1, 0.3], all_x_labels=True)
# Setting the subfigure titles
for ax_index in range(self.number_of_plots):
ax[ax_index].set_title(subfigure_title(ax_index, self.beach_state_list[ax_index]),
fontsize=self.ax_label_size)
# Creating a legend
size_colors = [plt.plot([], [], c=vUtils.discrete_color_from_cmap(sc, subdivisions=self.class_num),
label=size_label(sc), linestyle='-')[0] for sc in range(self.class_num)]
ax[-1].legend(handles=size_colors, fontsize=self.legend_size, loc='upper right')
# Plotting the various fractions
for size_class in range(self.class_num):
for beach_index, beach_state in enumerate(self.beach_state_list):
color_size = vUtils.discrete_color_from_cmap(size_class, subdivisions=self.class_num)
ax[beach_index].plot(time_list, timeseries_dict[size_class][beach_state], linestyle='-',
c=color_size)
# Saving the figure
plt.savefig(file_name(self.input, self.output_direc, self.shore_time, self.lambda_frag), bbox_inches='tight')
def file_name(output_direc, shore_time, lambda_frag, input):
str_format = input, shore_time, lambda_frag
return output_direc + 'FragmentationKaandorpPartial_beach_state_timeseries-{}_ST={}_lamf={}.png'.format(*str_format)
def subfigure_title(index, beach_state):
"""
setting the title of the subfigure
:param index:
:return:
"""
alphabet = string.ascii_lowercase
return '({}) {}'.format(alphabet[index], beach_state)
def size_label(size_class):
particle_size = settings.INIT_SIZE * settings.P_FRAG ** size_class
return 'Size class {}, d = {:.2f} mm'.format(size_class, particle_size * 1e3)
|
[
"31734765+VictorOnink@users.noreply.github.com"
] |
31734765+VictorOnink@users.noreply.github.com
|
e8c85a198d85379bb175f16c9e3150e47000350b
|
f03e771eb4c1f300ae819179090efc388bcc6d32
|
/src/pymine/tile/Tile.py
|
3547ab54aab3f2536690989bfb2649112dcd016b
|
[] |
no_license
|
lacthan28/PyMine
|
d8d2365b0aabefcb056754260f67095dbcbe62ff
|
e7d4778f01181d45551c02fa0cef151327fa240a
|
refs/heads/master
| 2021-01-21T19:50:48.417635
| 2017-06-30T05:38:46
| 2017-06-30T05:38:46
| 92,161,042
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,618
|
py
|
# -*- coding: utf-8 -*-
import inspect
from abc import *
from pymine.event.Timings import Timings
from pymine.level.Level import Level
from pymine.level.Position import Position
from pymine.nbt.tag.CompoundTag import CompoundTag
from spl.stubs.Core import isset, microtime
class Tile(metaclass = ABCMeta, Position):
"""
:param Chunk
"""
BREWING_STAND = "BrewingStand"
CHEST = "Chest"
ENCHANT_TABLE = "EnchantTable"
FLOWER_POT = "FlowerPot"
FURNACE = "Furnace"
ITEM_FRAME = "ItemFrame"
MOB_SPAWNER = "MobSpawner"
SIGN = "Sign"
SKULL = "Skull"
DISPENSER = "Dispenser"
DROPPER = "Dropper"
CAULDRON = "Cauldron"
HOPPER = "Hopper"
BEACON = "Beacon"
ENDER_CHEST = "EnderChest"
titleCount = 1
knownTiles = { }
shortNames = { }
chunk = None
name = None
id = None
attach = None
metadata = None
closed = False
namedtag = None
lastUpdate = None
server = None
timings = None
tickTimer = None
def init(self):
Tile.registerTile(Beacon)
Tile.registerTile(Chest)
Tile.registerTile(EnchantTable)
Tile.registerTile(FlowerPot)
Tile.registerTile(Furnace)
Tile.registerTile(ItemFrame)
Tile.registerTile(Sign)
Tile.registerTile(Skull)
Tile.registerTile(Cauldron)
Tile.registerTile(Hopper)
Tile.registerTile(EnderChest)
@staticmethod
def createTile(type, level: Level, nbt: CompoundTag, *args):
"""
:param str type:
:param Level level:
:param CompoundTag nbt:
:param args:
:rtype: Tile
:return:
"""
if isset(Tile.knownTiles[type]):
cls = Tile.knownTiles[type]
return cls(level, nbt, *args)
return None
@classmethod
def registerTile(cls):
"""
:rtype: bool
:return:
"""
classs = cls()
if isinstance(cls, Tile) and not inspect.isabstract(classs):
Tile.knownTiles[type(classs).__name__] = cls
Tile.shortNames[cls] = type(classs).__name__
return True
return False
def getSaveId(self):
return Tile.shortNames[__class__]
def __init__(self, level: Level, nbt: CompoundTag):
self.timings = Timings.getTileEntityTimings(self)
self.namedtag = nbt
self.server = level.getServer()
self.setLevel(level)
self.chunk = level.getChunk(self.namedtag['x'] >> 4, self.namedtag['z'] >> 4, False)
assert self.chunk is not None
self.name = ""
self.lastUpdate = microtime(True)
self.id = Tile.titleCount + 1
self.x = int(self.namedtag['x'])
self.y = int(self.namedtag['y'])
self.z = int(self.namedtag['z'])
self.chunk.addTile(self)
self.getLevel().addTile(self)
self.tickTimer = Timings.getTileEntityTimings(self)
def getId(self):
return self.id
def saveNBT(self):
self.namedtag.id = StringTag()
|
[
"lacthan28@gmail.com"
] |
lacthan28@gmail.com
|
bb76168acd060f61720726e831f2a0a94fe00c53
|
260499100ef43361cbf2815e2c0eb5288755862c
|
/Intro/07_almostIncreasingSequence.py
|
873528d2d7f438a47ea824f35b3e78211ab9d83a
|
[] |
no_license
|
Cheng0639/CodeFights_Python
|
c4640a3cfb6be89049fd3d0d04702b06071b4e39
|
6b536b851016510ee79359e33da662de21cb3d3a
|
refs/heads/master
| 2021-05-09T09:54:57.762593
| 2018-07-24T14:53:15
| 2018-07-24T14:53:15
| 109,243,927
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 671
|
py
|
def almostIncreasingSequence(sequence):
count_decreasing_sq = 0
for i in range(len(sequence) - 1):
if sequence[i + 1] <= sequence[i]:
count_decreasing_sq += 1
if (i >= 1) and (sequence[i + 1] <= sequence[i - 1]):
if (len(sequence) - 2 > i) and (sequence[i + 2] <= sequence[i]):
count_decreasing_sq += 1
if count_decreasing_sq > 1:
return False
return True
print(almostIncreasingSequence([1, 3, 2, 1]) == False)
print(almostIncreasingSequence([1, 3, 2]) == True)
print(almostIncreasingSequence([1, 2, 1, 2]) == False)
print(almostIncreasingSequence([1, 4, 10, 4, 2]) == False)
|
[
"dongdon0639@gmail.com"
] |
dongdon0639@gmail.com
|
8c754a532e50d2046cfeb41f5c5df7af538ee122
|
f854ef28002a3931a8d8b8d0b9cc691b8a449db3
|
/home-assistant/custom_components/browser_mod/binary_sensor.py
|
16333806a3baa1bbffe7e5797049573fb21ccf12
|
[
"MIT"
] |
permissive
|
Burningstone91/smart-home-setup
|
030cdaa13d05fb19a82b28ea455614d3276522ab
|
c2f34cc8b8243bc6ce620b3f03e3e44ff28150ca
|
refs/heads/master
| 2023-02-23T06:25:04.476657
| 2022-02-26T16:05:02
| 2022-02-26T16:05:02
| 239,319,680
| 421
| 36
|
MIT
| 2023-02-08T01:16:54
| 2020-02-09T14:39:06
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,646
|
py
|
from datetime import datetime
from homeassistant.const import (
STATE_UNAVAILABLE,
ATTR_BATTERY_CHARGING,
ATTR_BATTERY_LEVEL,
STATE_ON,
STATE_OFF,
)
from homeassistant.components.binary_sensor import DEVICE_CLASS_MOTION
from .helpers import setup_platform, BrowserModEntity
PLATFORM = "binary_sensor"
async def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
return setup_platform(hass, config, async_add_devices, PLATFORM, BrowserModSensor)
async def async_setup_entry(hass, config_entry, async_add_entities):
await async_setup_platform(hass, {}, async_add_entities)
class BrowserModSensor(BrowserModEntity):
domain = PLATFORM
def __init__(self, hass, connection, deviceID, alias=None):
super().__init__(hass, connection, deviceID, alias)
self.last_seen = None
def updated(self):
self.last_seen = datetime.now()
self.schedule_update_ha_state()
@property
def state(self):
if not self.connection.connection:
return STATE_UNAVAILABLE
if self.data.get("motion", False):
return STATE_ON
return STATE_OFF
@property
def is_on(self):
return not self.data.get("motion", False)
@property
def device_class(self):
return DEVICE_CLASS_MOTION
@property
def device_state_attributes(self):
return {
"type": "browser_mod",
"last_seen": self.last_seen,
ATTR_BATTERY_LEVEL: self.data.get("battery", None),
ATTR_BATTERY_CHARGING: self.data.get("charging", None),
**self.data,
}
|
[
"dimitri.steiner.gl@gmail.com"
] |
dimitri.steiner.gl@gmail.com
|
9d338275db0bb2d0043d091a349c50f9ffa5ffa9
|
e9c11f173507a06b40523714591cf1c443efcd89
|
/autoencode_project/vae.py
|
aea6bebb4650219215bdaca9cac090ed3b326ee3
|
[] |
no_license
|
IanEisenberg/CBMM
|
55100773b157981122aa261d70186c42ca04b685
|
6a1e2eda7308b1334187036ef37983b940b5d186
|
refs/heads/master
| 2021-01-19T07:40:30.735971
| 2017-08-31T01:16:32
| 2017-08-31T01:16:32
| 100,642,436
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,793
|
py
|
from keras.layers import Input, Dense, Lambda, Layer
from keras.models import Model
from keras import backend as K
from keras import metrics
import numpy as np
from os import path
import pandas as pd
from scipy.stats import norm
import seaborn as sns
from sklearn.preprocessing import scale
# load data
data_loc = path.join('Data', 'behavioral_data.csv')
data_df = pd.read_csv(data_loc, index_col=0)
data = data_df.values
n_held_out = data.shape[0]//6
data_train = scale(data[n_held_out:, :]);
data_held_out = scale(data[:n_held_out,:])
# VAE
batch_size = data_train.shape[0]//5
original_dim = data_train.shape[1]
latent_dim = 4
intermediate_dim = 50
epochs = 1000
epsilon_std = 1.0
x = Input(batch_shape=(batch_size, original_dim))
h = Dense(intermediate_dim, activation='relu')(x)
z_mean = Dense(latent_dim)(h)
z_log_sigma = Dense(latent_dim)(h)
def sampling(args):
z_mean, z_log_sigma = args
epsilon = K.random_normal(shape=(batch_size, latent_dim),
mean=0., stddev=epsilon_std)
return z_mean + K.exp(z_log_sigma) * epsilon
# note that "output_shape" isn't necessary with the TensorFlow backend
# so you could write `Lambda(sampling)([z_mean, z_log_sigma])`
z = Lambda(sampling)([z_mean, z_log_sigma])
decoder_h = Dense(intermediate_dim, activation='relu')
decoder_mean = Dense(original_dim, activation='sigmoid')
h_decoded = decoder_h(z)
x_decoded_mean = decoder_mean(h_decoded)
# end-to-end autoencoder
vae = Model(x, x_decoded_mean)
# encoder, from inputs to latent space
encoder = Model(x, z_mean)
# generator, from latent space to reconstructed inputs
decoder_input = Input(shape=(latent_dim,))
_h_decoded = decoder_h(decoder_input)
_x_decoded_mean = decoder_mean(_h_decoded)
generator = Model(decoder_input, _x_decoded_mean)
def vae_loss(x, x_decoded_mean):
xent_loss = metrics.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.5 * K.mean(1 + z_log_sigma - K.square(z_mean) - K.exp(z_log_sigma), axis=-1)
return xent_loss + kl_loss
vae.compile(optimizer='rmsprop', loss=vae_loss)
out = vae.fit(data_train, data_train,
shuffle=True,
epochs=epochs,
batch_size=batch_size,
validation_data=(data_held_out, data_held_out))
from matplotlib import pyplot as plt
from itertools import product
x_test_encoded = encoder.predict(data, batch_size=batch_size)
plt.figure(figsize=(6, 6))
plt.scatter(x_test_encoded[:, 0], x_test_encoded[:, 1])
n=8
grid=product(*[norm.ppf(np.linspace(0.05, 0.95, n))
for _ in range(latent_dim)])
samples = []
for loc in grid:
z_sample = np.array([loc])
x_decoded = generator.predict(z_sample)
samples.append(x_decoded)
samples = np.vstack(samples)
sns.heatmap(np.corrcoef(samples.T))
|
[
"ianeisenberg90@gmail.com"
] |
ianeisenberg90@gmail.com
|
3c1cb2eabb7a69bd4c6859d78945c5be3c53996c
|
15b71fe940708d3c04581bfb012c8d61705d6108
|
/compiler/src/thryft/compiler/parse_exception.py
|
18fec9277995932a528ee0f15e5e57fdd1b8d97c
|
[
"BSD-2-Clause"
] |
permissive
|
adam-singer/thryft
|
2f426c1751c02f95c4785f499065b28b853df42d
|
26cfd2148fa408aa5da8ac93bbe7b8722a0dfd8e
|
refs/heads/master
| 2021-05-27T00:42:19.358530
| 2014-07-27T19:09:34
| 2014-07-27T19:09:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,925
|
py
|
#-------------------------------------------------------------------------------
# Copyright (c) 2013, Minor Gordon
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
#-------------------------------------------------------------------------------
from thryft.compiler.token import Token
class ParseException(Exception):
def __init__(self, message, token=None):
if token is not None:
assert isinstance(token, Token)
message = "%s at %s" % (message, repr(token))
Exception.__init__(self, message)
self.__token = token
@property
def token(self):
return self.__token
|
[
"github@minorgordon.net"
] |
github@minorgordon.net
|
78851915cd48b3fa3a3b346632c40c8be0e4232d
|
51f1a5f544fd00c7449edeb28538dd99d4b5a1c2
|
/spyderpro/test/insert_scenece_history_data.py
|
5e97225b102be51df53dd91c6f3390ee4c72f6f0
|
[] |
no_license
|
LianZS/spyderpro
|
aab6f7f5c88c87f683f6cdacd19629d11da74009
|
5e34873cd13950dd3b5dc6341aad144522af0eae
|
refs/heads/master
| 2020-05-31T10:38:30.143573
| 2020-02-16T02:43:43
| 2020-02-16T02:43:43
| 190,239,345
| 8
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,005
|
py
|
import csv
import os
import pymysql
from threading import Thread, Semaphore
user = 'root'
password = 'lzs87724158'
host = "localhost"
port = 3306
scencefilepath = os.getcwd()
city_file_path = os.getcwd()
database = 'digitalsmart'
db = pymysql.connect(host=host, user=user, password=password, database=database,
port=port)
cur = db.cursor()
sql = "select pid,area ,table_id from digitalsmart.tablemanager "
cur.execute(sql)
area_map = dict()
for pid, area, table_id in cur.fetchall():
area_map[area] = {"pid": pid, "table_id": table_id}
rootpath = os.path.abspath(os.path.curdir)
dirpath = "/Volumes/Tigo/易班项目数据/景区客流数据/"
sql_file = open("./flowdata.sql", 'a+')
for file in os.listdir(dirpath):
area = file.split(".")[0]
print(file)
try:
id_data = area_map[area]
except KeyError:
continue
pid = id_data['pid']
table_id = id_data['table_id']
filepath = dirpath + file
f = open(filepath, 'r')
r = csv.reader(f)
if table_id == 0:
sql_format = "insert into digitalsmart.historyscenceflow0(pid, ddate, ttime, num) VALUES "
elif table_id == 1:
sql_format = "insert into digitalsmart.historyscenceflow1(pid, ddate, ttime, num) VALUES "
elif table_id == 2:
sql_format = "insert into digitalsmart.historyscenceflow2 (pid, ddate, ttime, num) VALUES "
elif table_id == 3:
sql_format = "insert into digitalsmart.historyscenceflow3 (pid, ddate, ttime, num) VALUES "
elif table_id == 4:
sql_format = "insert into digitalsmart.historyscenceflow4 (pid, ddate, ttime, num) VALUES "
elif table_id == 5:
sql_format = "insert into digitalsmart.historyscenceflow5 (pid, ddate, ttime, num) VALUES "
elif table_id == 6:
sql_format = "insert into digitalsmart.historyscenceflow6 (pid, ddate, ttime, num) VALUES "
elif table_id == 7:
sql_format = "insert into digitalsmart.historyscenceflow7 (pid, ddate, ttime, num) VALUES "
elif table_id == 8:
sql_format = "insert into digitalsmart.historyscenceflow8 (pid, ddate, ttime, num) VALUES "
elif table_id == 9:
sql_format = "insert into digitalsmart.historyscenceflow9 (pid, ddate, ttime, num) VALUES "
sql_file.write(sql_format)
item = r.__next__()
date_time = item[0] # 2018-04-08 19:05:00
ddate = int(date_time.split(" ")[0].replace("-", ''))
ttime = date_time.split(" ")[1]
num = int(item[1])
value = "(%d,%d,'%s',%d)" % (pid, ddate, ttime, num)
sql_file.write(value)
for item in r:
try:
date_time = item[0] # 2018-04-08 19:05:00
ddate = int(date_time.split(" ")[0].replace("-", ''))
ttime = date_time.split(" ")[1]
num = int(item[1])
except Exception:
print(item)
continue
value = "(%d,%d,'%s',%d)" % (pid, ddate, ttime, num)
sql_file.write(",")
sql_file.write(value)
break
sql_file.write(";")
|
[
"2214057556@qq.com"
] |
2214057556@qq.com
|
ff9fc9c3b9bbd3652a26b7aa1af092f08760aecf
|
4a83925d00f26b57db9a77553fbacf30735d1171
|
/open/core/betterself/tests/views/test_measurement_views.py
|
9bc05ae6117eed20260d67dd63e7475525afa9ea
|
[
"MIT"
] |
permissive
|
mmangione/open
|
581f622a6c2fe465fc7bd5cd0dc43a2a5f098248
|
5163e47ea6ba6160bf12a3ebe18bc76d078ea62c
|
refs/heads/master
| 2021-07-15T09:34:14.355559
| 2020-10-14T09:08:14
| 2020-10-14T09:08:14
| 217,483,795
| 0
| 0
|
MIT
| 2020-10-14T09:08:16
| 2019-10-25T08:05:58
|
Python
|
UTF-8
|
Python
| false
| false
| 945
|
py
|
from django.contrib.auth import get_user_model
from test_plus import TestCase
from open.core.betterself.constants import BetterSelfResourceConstants
from open.core.betterself.factories import MeasurementFactory
from open.core.betterself.models.measurement import Measurement
from open.core.betterself.tests.mixins.resource_mixin import (
BetterSelfResourceViewTestCaseMixin,
)
User = get_user_model()
"""
python manage.py test --pattern="*test_measurement_views.py" --keepdb
"""
class TestMeasurementView(BetterSelfResourceViewTestCaseMixin, TestCase):
url_name = BetterSelfResourceConstants.MEASUREMENTS
model_class_factory = MeasurementFactory
model_class = Measurement
def test_view(self):
data = self.client_1.get(self.url).data
self.assertEqual(len(data), 5)
def test_no_access_view(self):
"""
Doesn't apply here, measurements are available for all.
"""
return
|
[
"jeffshek@gmail.com"
] |
jeffshek@gmail.com
|
919842e59adaa424c81909c1e5e0a91ed3666ca8
|
a6bf211d2b31c2d7fdb927924d77ac00f1fb4d5f
|
/scripts/chart-total-checks-all.py
|
ad25b40f96661a760cd0c1901cb00d14c3cd0c9a
|
[
"MIT"
] |
permissive
|
tzor1234/nics-firearm-background-checks
|
f96770c4d7e4ac6eed16ad2dff94717f51c8611c
|
9e752e891124de16c2fd911050f853498c577260
|
refs/heads/master
| 2023-09-05T21:42:49.096106
| 2021-11-01T19:25:43
| 2021-11-01T19:25:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 737
|
py
|
#!/usr/bin/env python
import sys, os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import StrMethodFormatter
import seaborn as sb
sb.set()
checks = (
pd.read_csv(sys.stdin)
.assign(
month_dt = lambda df: pd.to_datetime(df["month"], format = "%Y-%m")
)
)
totals = checks.groupby("month_dt")["totals"].sum()
ax = totals.plot(kind="area", figsize=(12, 8), color="#000000", alpha=0.5)
ax.figure.set_facecolor("#FFFFFF")
ax.set_title(
"Monthly NICS Background Check Totals Since Nov. 1998",
fontsize=24
)
plt.setp(ax.get_yticklabels(), fontsize=12)
ax.yaxis.set_major_formatter(StrMethodFormatter("{x:,.0f}"))
ax.set_xlabel("")
plt.savefig(sys.stdout.buffer)
|
[
"jsvine@gmail.com"
] |
jsvine@gmail.com
|
29800f47bd874f8ef18582517b30b1a222c6d4f7
|
aa4aa51465d79e0447cbe22281f0402ca95bdaa2
|
/python/project/gibbs.py
|
6332c91e98afb6236718f6b588e65561703cc60e
|
[] |
no_license
|
zuozuo12/usualProject
|
2ca06bb7a1ff6f99343f1997053ba8d5a48e00a7
|
335bcef5d76d6cf0c84dd3209176089b3b07fbba
|
refs/heads/master
| 2020-11-27T17:02:33.252884
| 2019-10-22T06:46:32
| 2019-10-22T06:46:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 858
|
py
|
from scipy.stats import norm
import matplotlib.pyplot as plt
import random
import math
def gibbs(N=500, thin=10):
pi = [];
x = 0
y = 0
for i in range(N):
for j in range(thin):
x = norm.rvs(loc=y, scale=2, size=1, random_state=None)
y = norm.rvs(loc=x, scale=3, size=1, random_state=None)
pi.append(x[0]);
print(pi)
return pi;
pi=gibbs()
plt.hist(pi, 100, normed=1, facecolor='red', alpha=0.7,label='Samples Distribution')
plt.show();
# import random, math
#
#
# def gibbs(N=50000, thin=1000):
# x = 0
# y = 0
# print
# "Iter x y"
# for i in range(N):
# for j in range(thin):
# x = random.gammavariate(3, 1.0 / (y * y + 4))
# y = random.gauss(1.0 / (x + 1), 1.0 / math.sqrt(2 * x + 2))
# print
# i, x, y
#
#
# gibbs()
|
[
"llfwyyx@163.com"
] |
llfwyyx@163.com
|
9430c9ef1326c8401d3de54c76f50e4741fbcd27
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_083/ch11_2020_03_18_03_05_56_266683.py
|
daa8a1dbf71d778dd8294f4e1e22a98e871fce67
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 176
|
py
|
import math
def distancia_euclidiana(x1,x2,y1,y2):
return (((x2-x1)**2) + ((y2-y1)**2))**1/2
a=2
b=3
c=4
d=5
e=((3-2)**2) + (5-4)**2
raiz= math.sqrt(e)
print(raiz)
|
[
"you@example.com"
] |
you@example.com
|
327458930aa320ef01199a11641fa16213635e1d
|
f1fcd165cd8444310ce5d201e481e3982dc28110
|
/medium/1901/190127/jang.py
|
c9913b842429b52e94d07f0ea82989fad994fc40
|
[] |
no_license
|
JoosJuliet/algoStudy
|
310a71a0fcc8f3c23281544cf3458ed999040176
|
3fc1e850f9d8b9f290f41fddd59ff403fbfffa05
|
refs/heads/master
| 2020-04-20T19:26:25.485875
| 2019-03-27T22:37:27
| 2019-03-27T22:37:27
| 169,049,593
| 1
| 0
| null | 2019-02-04T08:43:07
| 2019-02-04T08:43:07
| null |
UTF-8
|
Python
| false
| false
| 592
|
py
|
n = int(input())
arr = list(map(int, input().split()))
enum_arr = sorted(enumerate(arr, start=1), key=lambda t: t[1])
frm = []
to = []
for i in range(1, len(enum_arr)+1):
if enum_arr[i-1][0] != i:
frm.append(i)
to.append(enum_arr[i-1][0])
cvted = list(range(frm[0], frm[-1]+1))
if len(cvted) % 2 == 1:
cvted.pop(len(cvted)//2)
if len(frm) == 0:
print("yes")
elif len(frm) == 2 and frm == to[::-1]:
print("yes")
print("swap", *frm)
elif frm == to[::-1] and frm == cvted:
print("yes")
print("reverse", frm[0], frm[-1])
else:
print("no")
|
[
"wkdtjsgur100@naver.com"
] |
wkdtjsgur100@naver.com
|
4d51c21e128583698658ed9af9d417243d4275fe
|
ff6248be9573caec94bea0fa2b1e4b6bf0aa682b
|
/raw_scripts/132.230.102.123-10.21.11.29/1569578047.py
|
23acb9e0de056e6062a2453e8fecf4cce4d3df5d
|
[] |
no_license
|
LennartElbe/codeEvo
|
0e41b1a7705204e934ef71a5a28c047366c10f71
|
e89b329bc9edd37d5d9986f07ca8a63d50686882
|
refs/heads/master
| 2020-12-21T17:28:25.150352
| 2020-03-26T10:22:35
| 2020-03-26T10:22:35
| 236,498,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,831
|
py
|
import functools
import typing
import string
import random
import pytest
# Lösung Teil 1.
def list_filter(x, xs):
"""
x: Int
xs: List
Diese funktion returnt die werte der liste, welche kleiner als x sind."""
L = []
for i in xs:
if i <= x:
L.append(i)
return L
######################################################################
## hidden code
def mk_coverage():
covered = set()
target = set(range(6))
count = 0
def coverage(func):
nonlocal covered, target, count
def wrapper(x, xs):
nonlocal covered, count
if xs == []:
covered.add(0)
if len (xs) == 1:
covered.add(1)
if len (xs) > 1:
covered.add(2)
if x in xs:
covered.add(3)
if len ([y for y in xs if y < x]):
covered.add(4)
if len ([y for y in xs if y > x]):
covered.add(5)
r = func (x, xs)
count += 1
return r
if func == "achieved": return len(covered)
if func == "required": return len(target)
if func == "count" : return count
if func.__doc__:
wrapper.__doc__ = func.__doc__
wrapper.__hints__ = typing.get_type_hints (func)
return wrapper
return coverage
coverage = mk_coverage()
try:
list_filter = coverage(list_filter)
except:
pass
# Lösung Teil 2. (Test)
def test_list_filter():
assert (list_filter(3, [1,2,3,4,5,6]) == [1,2,3])
assert (list_filter(-1, [-2, -1, 0, 1]) == [-2, -1])
assert (list_filter(1, [2,3,4,5] == [])
######################################################################
## hidden tests
pytest.main (["-v", "--assert=plain", "-p", "no:cacheprovider"])
from inspect import getfullargspec
class TestNames:
def test_list_filter (self):
assert list_filter
assert 'x' in getfullargspec(list_filter).args
assert 'xs' in getfullargspec(list_filter).args
class TestGrades:
def test_docstring_present(self):
assert list_filter.__doc__ is not None
def test_typing_present(self):
assert list_filter.__hints__ == typing.get_type_hints(self.list_filter_oracle)
def test_coverage(self):
assert coverage("achieved") == coverage("required")
def list_filter_oracle(self, x:int, xs:list)->list:
return [ y for y in xs if y <= x ]
def check_filter (self, x, xs):
assert list_filter (x, xs) == self.list_filter_oracle (x, xs)
def test_correctness(self):
for i in range (100):
l = random.randrange (6)
xs = [ random.randrange (10) for z in range(l) ]
x = random.randrange (10)
self.check_filter (x, xs)
|
[
"lenni.elbe@gmail.com"
] |
lenni.elbe@gmail.com
|
10992b85ca307716f9540574e49c699529f6575f
|
59b18dec434fc54cdaf6fd6c224fea9c783f2043
|
/MyBlog/comments/migrations/0001_initial.py
|
a6c7e5dfd249072e98c81326c4d5ee7adbb88c97
|
[] |
no_license
|
InformationX/MyBlog
|
00e95f27a78be39338fbaa462b1fa069cdfad3e6
|
2a5b15535c17b0eee0d1fa9bcebc5f7207dd46db
|
refs/heads/master
| 2021-04-10T00:54:09.002419
| 2019-10-09T11:56:24
| 2019-10-09T11:56:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,505
|
py
|
# Generated by Django 2.1.4 on 2019-06-13 09:19
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('blog', '0004_auto_20190613_1527'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.TextField(verbose_name='正文')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('last_mod_time', models.DateTimeField(auto_now=True, verbose_name='修改时间')),
('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Article', verbose_name='文章')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='作者')),
('parent_comment', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='comments.Comment', verbose_name='上级评论')),
],
options={
'verbose_name': '评论',
'verbose_name_plural': '评论',
'ordering': ['created_time'],
},
),
]
|
[
"javs_shao@163.com"
] |
javs_shao@163.com
|
ea7744864101673132321095636165fa4a0cbc9b
|
a63d907ad63ba6705420a6fb2788196d1bd3763c
|
/src/api/dataflow/modeling/model/model_serializer.py
|
8e578fb7093c3752167b0baac4b745d5530625d1
|
[
"MIT"
] |
permissive
|
Tencent/bk-base
|
a38461072811667dc2880a13a5232004fe771a4b
|
6d483b4df67739b26cc8ecaa56c1d76ab46bd7a2
|
refs/heads/master
| 2022-07-30T04:24:53.370661
| 2022-04-02T10:30:55
| 2022-04-02T10:30:55
| 381,257,882
| 101
| 51
|
NOASSERTION
| 2022-04-02T10:30:56
| 2021-06-29T06:10:01
|
Python
|
UTF-8
|
Python
| false
| false
| 4,831
|
py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from rest_framework import serializers
class CreateModelSerializer(serializers.Serializer):
"""
{
"last_sql": "create table xxx run model xxx",
"project_id": 123,
"model_name": "abc_model",
"model_alias": "阿彼此",
"is_public": True,
"description": "........",
"result_table_ids": ['xx', 'xxx'],
}
"""
last_sql = serializers.CharField(required=True, label="the last sql in the cell")
project_id = serializers.IntegerField(required=True, label="project id")
model_name = serializers.CharField(required=True, label="basic model")
model_alias = serializers.CharField(required=False, label="model alias", default=None)
is_public = serializers.BooleanField(required=False, label="is it public", default=False)
description = serializers.CharField(
required=False,
label="description",
allow_null=True,
allow_blank=True,
default=None,
)
result_table_ids = serializers.ListField(required=True, label="The result tables of the current note")
experiment_name = serializers.CharField(required=False, label="experiment name")
experiment_id = serializers.IntegerField(required=False, label="experiment id", default=None)
evaluation_result = serializers.DictField(required=False, label="model evaluation information", default={})
notebook_id = serializers.IntegerField(required=False, label="model notebook id", default=0)
class InspectionBeforeRelease(serializers.Serializer):
"""
{
"last_sql": "create table xxx run model xxx",
}
"""
sql = serializers.CharField(required=True, label="sql block in the cell")
class UpdateModelSerializer(serializers.Serializer):
"""
{
"last_sql": "create table xxx run model xxx",
"project_id": 123,
"description": "........",
"result_table_ids": ['xx', 'xxx'],
}
"""
last_sql = serializers.CharField(required=True, label="the last sql in the cell")
project_id = serializers.IntegerField(required=True, label="project id")
description = serializers.CharField(required=False, label="description", allow_null=True, allow_blank=True)
result_table_ids = serializers.ListField(required=True, label="The result tables of the current note")
experiment_name = serializers.CharField(required=False, label="experiment name", allow_null=True, allow_blank=True)
experiment_id = serializers.IntegerField(required=False, label="experiment id", default=None, allow_null=True)
evaluation_result = serializers.DictField(required=False, label="model evaluation information", default={})
class GetAlgorithmListSerializer(serializers.Serializer):
"""
{
'framework':'spark_mllib'
}
"""
framework = serializers.CharField(required=False, label="algorithm frame")
class GetReleaseResultSerializer(serializers.Serializer):
"""
{
"task_id": 123
}
"""
task_id = serializers.IntegerField(required=True, label="release task id")
class GetProjectReleaseSerializer(serializers.Serializer):
"""
{
"project_id": 123
}
"""
project_id = serializers.IntegerField(required=True, label="project id")
class GetUpdateReleaseSerializer(serializers.Serializer):
"""
{
"project_id": 123
}
"""
project_id = serializers.IntegerField(required=True, label="project id")
|
[
"terrencehan@tencent.com"
] |
terrencehan@tencent.com
|
f2febac8f4268f36933396a59726a5d1d8eaee71
|
31747dd8c61085421d7bd4166f7bd4f9429cf914
|
/tests/test_visitors/test_ast/test_naming/conftest.py
|
c365a79530f1db42a9abe5b5ddc0d39fee60ac86
|
[
"MIT"
] |
permissive
|
edytagarbarz/wemake-python-styleguide
|
0e9ed4080a13a6727b8e80785e113b8407409352
|
74b86156d73c2a4fe9c755138f6953fec41fab3b
|
refs/heads/master
| 2021-03-03T19:21:54.807089
| 2020-03-07T23:35:15
| 2020-03-07T23:35:15
| 245,981,718
| 1
| 1
|
MIT
| 2020-03-09T08:31:55
| 2020-03-09T08:31:54
| null |
UTF-8
|
Python
| false
| false
| 4,646
|
py
|
import pytest
from wemake_python_styleguide.compat.constants import PY38
# Imports:
import_alias = """
import os as {0}
"""
from_import_alias = """
from os import path as {0}
"""
# Function names:
function_name = 'def {0}(): ...'
method_name = """
class Input(object):
def {0}(self): ...
"""
# Function arguments:
function_argument = 'def test(arg, {0}): ...'
method_argument = """
class Input(object):
def validate(self, {0}): ...
"""
function_keyword_argument = 'def test(arg, {0}=None): ...'
method_keyword_argument = """
class Input(object):
def validate(self, {0}=None): ...
"""
function_args_argument = 'def test(arg, *{0}): ...'
function_kwargs_argument = 'def test(arg, **{0}): ...'
method_args_argument = """
class Input(object):
def validate(self, *{0}): ...
"""
method_kwargs_argument = """
class Input(object):
def validate(self, **{0}): ...
"""
function_posonly_argument = """
def test({0}, /): ...
"""
function_kwonly_argument = """
def test(*, {0}): ...
"""
function_kwonly_default_argument = """
def test(*, {0}=True): ...
"""
method_kwonly_argument = """
class Input(object):
def test(self, *, {0}=True): ...
"""
lambda_argument = 'lambda {0}: ...'
# Class attributes:
static_attribute = """
class Test:
{0} = None
"""
static_typed_attribute = """
class Test:
{0}: int = None
"""
static_typed_annotation = """
class Test:
{0}: int
"""
instance_attribute = """
class Test(object):
def __init__(self):
self.{0} = 123
"""
instance_typed_attribute = """
class Test(object):
def __init__(self):
self.{0}: int = 123
"""
# Variables:
variable_def = """
{0} = 'test'
"""
variable_typed_def = """
{0}: str = 'test'
"""
variable_typed = """
{0}: str
"""
# See: https://github.com/wemake-services/wemake-python-styleguide/issues/405
unpacking_variables = """
first.attr, {0} = range(2)
"""
unpacking_star_variables = """
first, *{0} = range(2)
"""
for_variable = """
def container():
for {0} in []:
...
"""
for_star_variable = """
def container():
for index, *{0} in []:
...
"""
with_variable = """
def container():
with open('test.py') as {0}:
...
"""
with_star_variable = """
def container():
with open('test.py') as (first, *{0}):
...
"""
exception = """
try:
1 / 0
except Exception as {0}:
raise
"""
# Fixtures:
_ALL_FIXTURES = frozenset((
# Imports:
import_alias,
from_import_alias,
# Function names, we don't use async function because we generate them:
function_name,
method_name,
# Function arguments:
function_argument,
method_argument,
function_keyword_argument,
method_keyword_argument,
function_args_argument,
function_kwargs_argument,
method_args_argument,
method_kwargs_argument,
function_kwonly_argument,
function_kwonly_default_argument,
method_kwonly_argument,
lambda_argument,
# Class attributes:
static_attribute,
static_typed_attribute,
static_typed_annotation,
instance_attribute,
instance_typed_attribute,
# Variables:
variable_def,
variable_typed_def,
variable_typed,
unpacking_variables,
unpacking_star_variables,
for_variable,
for_star_variable,
with_variable,
with_star_variable,
exception,
))
if PY38:
_ALL_FIXTURES |= {function_posonly_argument}
_SUITABLE_FOR_UNUSED_TUPLE = frozenset((
unpacking_variables,
variable_def,
with_variable,
))
_SUITABLE_FOR_UNUSED = _SUITABLE_FOR_UNUSED_TUPLE | frozenset((
variable_typed_def,
variable_typed,
exception,
))
@pytest.fixture(params=_ALL_FIXTURES)
def naming_template(request):
"""Parametrized fixture that contains all possible naming templates."""
return request.param
@pytest.fixture(params=_SUITABLE_FOR_UNUSED)
def forbidden_unused_template(request):
"""Returns template that can be used to define wrong unused variables."""
return request.param
@pytest.fixture(params=_SUITABLE_FOR_UNUSED_TUPLE)
def forbidden_tuple_unused_template(request):
"""Returns template that can be used to define wrong unused tuples."""
return request.param
@pytest.fixture(params=_SUITABLE_FOR_UNUSED | {
static_attribute,
static_typed_attribute,
static_typed_annotation,
})
def forbidden_raw_unused_template(request):
"""Returns template that can be used to define wrong unused tuples."""
return request.param
@pytest.fixture(params=_ALL_FIXTURES - _SUITABLE_FOR_UNUSED)
def allowed_unused_template(request):
"""Returns template that can define unused variables."""
return request.param
|
[
"mail@sobolevn.me"
] |
mail@sobolevn.me
|
e80bab55c48ceccf23ecc4e67e62307d15f29969
|
929d12e11ed2fb69476b9d07932e38662f0ce6fc
|
/Queues/Reversing Elements Of Queue.py
|
468388212a76ab4d1e361cdeb721eb786c208301
|
[] |
no_license
|
arnabs542/Data-Structures-And-Algorithms
|
b8f341a31ca18044bf179294fbcb0fac1f835216
|
ffcc2f8a25520ce37cd1f67e6225281c85141a65
|
refs/heads/master
| 2022-12-13T14:09:55.005341
| 2020-09-13T11:58:58
| 2020-09-13T11:58:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,398
|
py
|
"""
Reversing Elements Of Queue
Problem Description
Given an array of integers A and an integer B. We need to reverse the order of the first B elements of the array, leaving the other elements in the same relative order.
NOTE: You are required to first insert elements into an auxiliary queue then perform Reversal of first B elements.
Problem Constraints
1 <= B <= length of the array <= 200000
1 <= A[i] <= 100000
Input Format
The argument given is the integer array A and an integer B.
Output Format
Return an array of integer after reversing the first B elements of A using queue.
Example Input
Input 1:
A = [1, 2, 3, 4, 5]
B = 3
Input 2:
A = [5, 17, 100, 11]
B = 2
Example Output
Output 1:
[3, 2, 1, 4, 5]
Output 2:
[17, 5, 100, 11]
Example Explanation
Explanation 1:
Reverse first 3 elements so the array becomes [3, 2, 1, 4, 5]
Explanation 2:
Reverse first 2 elements so the array becomes [17, 5, 100, 11]
"""
from collections import deque
class Solution:
# @param A : list of integers
# @param B : integer
# @return a list of integers
def solve(self, A, B):
helper = deque()
for i in range(B):
helper.append(A[i])
index = B - 1
while len(helper) != 0:
temp = helper.popleft()
A[index] = temp
index -= 1
return A
|
[
"rishu1998jain@gmail.com"
] |
rishu1998jain@gmail.com
|
c541305cafe96c7c8667b45be1618611178980d9
|
23ec357d5df7addf06cb70c10ba9173521c70a9b
|
/core/migrations/0018_auto_20210621_0048.py
|
9a7276bde29db5d951039b362455c331c431ca68
|
[] |
no_license
|
blimp666/d_job
|
b8e8b93ef6b94e24a38bd94195a779bfff7f3c30
|
18904ac12af6593bf59b1ba379f722bd69d00863
|
refs/heads/main
| 2023-06-07T21:50:34.596128
| 2021-06-22T11:15:20
| 2021-06-23T19:36:48
| 376,893,878
| 0
| 0
| null | 2021-06-15T19:30:46
| 2021-06-14T16:48:17
|
Python
|
UTF-8
|
Python
| false
| false
| 911
|
py
|
# Generated by Django 3.2.4 on 2021-06-21 00:48
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0017_auto_20210619_1206'),
]
operations = [
migrations.AlterField(
model_name='application',
name='file',
field=models.FileField(default='', upload_to='app_files', verbose_name='Работа'),
),
migrations.AlterField(
model_name='conference',
name='date_start',
field=models.DateField(default=datetime.datetime(2021, 6, 21, 0, 48, 43, 294739), verbose_name='Дата проведения'),
),
migrations.AlterField(
model_name='conference',
name='file',
field=models.FileField(default='', upload_to='conf_files', verbose_name='Вложения'),
),
]
|
[
"email@example.com"
] |
email@example.com
|
f301d8c1426299d986f583448ef5069d417a5f45
|
30a2f77f5427a3fe89e8d7980a4b67fe7526de2c
|
/analyze/BHistograms_trigjetht_eta1p7_CSVM_cfg.py
|
4ae59fa9d3b2fae9f267eb69dbd0e0b841e7c040
|
[] |
no_license
|
DryRun/QCDAnalysis
|
7fb145ce05e1a7862ee2185220112a00cb8feb72
|
adf97713956d7a017189901e858e5c2b4b8339b6
|
refs/heads/master
| 2020-04-06T04:23:44.112686
| 2018-01-08T19:47:01
| 2018-01-08T19:47:01
| 55,909,998
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,788
|
py
|
import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.VarParsing as VarParsing
import sys
options = VarParsing.VarParsing()
options.register('inputFiles',
'/uscms/home/dryu/eosdir/BJetPlusX/QCDBEventTree_BJetPlusX_Run2012B_v1_3/160429_121519/0000/QCDBEventTree_567.root',
VarParsing.VarParsing.multiplicity.list,
VarParsing.VarParsing.varType.string,
"List of input files"
)
options.register('outputFile',
'BHistograms_trigjetht_CSVL.root',
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Output file"
)
options.register('dataSource',
'collision_data',
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
'collision_data or simulation'
)
options.register('dataType',
'data',
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
'data, signal, or background'
)
options.register('signalMass',
750.,
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.float,
'Signal mass hypothesis (only necessary for running over signal)'
)
options.parseArguments()
if options.dataSource != "collision_data" and options.dataSource != "simulation":
print "[BHistograms_BJetPlusX_loose] ERROR : dataSource must be collision_data or simulation"
sys.exit(1)
if not options.dataType in ["data", "signal", "background"]:
print "[BHistograms_BJetPlusX_loose] ERROR : dataType must be data, signal, or background"
sys.exit(1)
process = cms.Process("myprocess")
process.TFileService=cms.Service("TFileService",fileName=cms.string(options.outputFile))
##-------------------- Define the source ----------------------------
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.source = cms.Source("EmptySource")
##-------------------- Cuts ------------------------------------------
# Cuts on the leading two jets
dijet_cuts = cms.VPSet(
cms.PSet(
name = cms.string("MinPt"),
parameters = cms.vdouble(30.),
descriptors = cms.vstring()
),
cms.PSet(
name = cms.string("MaxAbsEta"),
parameters = cms.vdouble(1.7),
descriptors = cms.vstring()
),
cms.PSet(
name = cms.string("IsTightID"),
parameters = cms.vdouble(),
descriptors = cms.vstring()
),
cms.PSet(
name = cms.string("MaxMuonEnergyFraction"),
parameters = cms.vdouble(0.8),
descriptors = cms.vstring()
),
)
# Cuts on all PF jets (defines the generic jet collection for e.g. making fat jets)
pfjet_cuts = cms.VPSet(
cms.PSet(
name = cms.string("MinPt"),
parameters = cms.vdouble(30.),
descriptors = cms.vstring()
),
cms.PSet(
name = cms.string("MaxAbsEta"),
parameters = cms.vdouble(5),
descriptors = cms.vstring()
),
cms.PSet(
name = cms.string("IsLooseID"),
parameters = cms.vdouble(),
descriptors = cms.vstring()
),
)
# Cuts on calo jets
calojet_cuts = cms.VPSet(
cms.PSet(
name = cms.string("MinPt"),
parameters = cms.vdouble(30.),
descriptors = cms.vstring()
)
)
# Event cuts
event_cuts = cms.VPSet(
cms.PSet(
name = cms.string("TriggerOR"),
parameters = cms.vdouble(),
descriptors = cms.vstring("HLT_PFHT650_v5", "HLT_PFHT650_v6", "HLT_PFHT650_v7", "HLT_PFHT650_v8", "HLT_PFHT650_v9", "HLT_PFNoPUHT650_v1", "HLT_PFNoPUHT650_v3", "HLT_PFNoPUHT650_v4", "HLT_HT750_v1", "HLT_HT750_v2", "HLT_HT750_v3", "HLT_HT750_v4", "HLT_HT750_v5", "HLT_HT750_v7")
),
cms.PSet(
name = cms.string("MaxMetOverSumEt"),
parameters = cms.vdouble(0.5),
descriptors = cms.vstring()
),
cms.PSet(
name = cms.string("GoodPFDijet"),
parameters = cms.vdouble(),
descriptors = cms.vstring()
),
cms.PSet(
name = cms.string("MinNCSVM"),
parameters = cms.vdouble(2),
descriptors = cms.vstring()
),
cms.PSet(
name = cms.string("MinLeadingPFJetPt"),
parameters = cms.vdouble(80.),
descriptors = cms.vstring()
),
cms.PSet(
name = cms.string("MinSubleadingPFJetPt"),
parameters = cms.vdouble(70.),
descriptors = cms.vstring()
),
cms.PSet(
name = cms.string("PFDijetMaxDeltaEta"),
parameters = cms.vdouble(1.3),
descriptors = cms.vstring()
)
)
##-------------------- User analyzer --------------------------------
process.BHistograms = cms.EDAnalyzer('BHistograms',
file_names = cms.vstring(options.inputFiles),
tree_name = cms.string('ak5/ProcessedTree'),
trigger_histogram_name = cms.string('ak5/TriggerNames'),
#triggers = cms.vstring('HLT_DiJet80Eta2p6_BTagIP3DFastPVLoose_v2:L1_DoubleJetC36', 'HLT_DiJet80Eta2p6_BTagIP3DFastPVLoose_v3:L1_DoubleJetC36', 'HLT_DiJet80Eta2p6_BTagIP3DFastPVLoose_v4:L1_DoubleJetC36', 'HLT_DiJet80Eta2p6_BTagIP3DFastPVLoose_v5:L1_DoubleJetC36', 'HLT_DiJet80Eta2p6_BTagIP3DFastPVLoose_v7:L1_DoubleJetC36'),
#triggers = cms.vstring( 'HLT_Jet160Eta2p4_Jet120Eta2p4_DiBTagIP3DFastPVLoose_v2:L1_SingleJet128', 'HLT_Jet160Eta2p4_Jet120Eta2p4_DiBTagIP3DFastPVLoose_v3:L1_SingleJet128', 'HLT_Jet160Eta2p4_Jet120Eta2p4_DiBTagIP3DFastPVLoose_v4:L1_SingleJet128', 'HLT_Jet160Eta2p4_Jet120Eta2p4_DiBTagIP3DFastPVLoose_v5:L1_SingleJet128', 'HLT_Jet160Eta2p4_Jet120Eta2p4_DiBTagIP3DFastPVLoose_v7:L1_SingleJet128'),
data_source = cms.string(options.dataSource),
data_type = cms.string(options.dataType),
signal_mass = cms.double(options.signalMass),
max_events = cms.int32(-1),
dijet_cuts = dijet_cuts,
pfjet_cuts = pfjet_cuts,
calojet_cuts = calojet_cuts,
event_cuts = event_cuts,
fatjet_delta_eta_cut = cms.double(1.1),
btag_wp_1 = cms.string('CSVM'),
btag_wp_2 = cms.string('CSVM'),
)
process.p = cms.Path(process.BHistograms)
|
[
"david.renhwa.yu@gmail.com"
] |
david.renhwa.yu@gmail.com
|
b3573574645dab4a14085dbbfc1ceed3c185f247
|
231f8a898b20e475a5cabff439600de211d825c0
|
/superlists/urls.py
|
1b817a2f4121136d79814f362f95857a779149c2
|
[
"MIT"
] |
permissive
|
thewchan/superlists
|
f7370b341ce7c37b8cae506eb5bafdd2fb31b07a
|
af41636b2cdafb45c638e36076b9cdefc5586aad
|
refs/heads/master
| 2023-05-26T11:01:24.310480
| 2021-06-11T21:12:20
| 2021-06-11T21:12:20
| 361,209,827
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 946
|
py
|
"""superlists URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from accounts import urls as accounts_urls
from lists import views as list_views, urls as list_urls
urlpatterns = [
url(r"^$", list_views.home_page, name="home"),
url(r"^lists/", include(list_urls)),
url(r"^accounts/", include(accounts_urls)),
]
|
[
"thewchan@gmail.com"
] |
thewchan@gmail.com
|
3b6cce24b6bf09f2d048fa65cbaea2b432e16d92
|
4dda597dac544b237cf8f8b04b3c9e662b988a92
|
/11-1/mydata/applitions/views.py
|
8f8ffa4a104ff5767a5aa416a9ebb19e76647a2d
|
[] |
no_license
|
beriuta/history
|
02ac9414c0475fde59f6a455c23c56235fe3c4bc
|
026c965f694e84120825069bedf7bfac235318b5
|
refs/heads/master
| 2020-04-11T17:24:39.558174
| 2018-12-16T02:08:18
| 2018-12-16T02:08:18
| 161,959,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,618
|
py
|
from django.shortcuts import render
from datetime import datetime,timedelta
# Create your views here.
def t(request):
name = '测试'
d1 = {'name': '小鬼', 'age': 18, 'hobby': 'eat', 'items': 'hello'}
class People:
def __init__(self, name, age):
self.name = name
self.age = age
@staticmethod
def dream():
return 'Write the code,Change the world'
p5 = People('小懒虫', 1)
p1 = People('小明', 23)
p2 = People('小红', 13)
p3 = People('小白', 15)
list1 = [p1, p2, p3]
list2 = ['深圳', '上海', '北京', '广州', '东莞', '威海', '青岛', '潍坊']
return render(request, 't.html', {'name': name, 'd1': d1, 'f5': p5, 'list': list1, 'list2': list2})
def s(request):
l = [11, 32, 73]
name = '小懒虫'
class Food:
def __init__(self, name, kg):
self.name = name
self.kg = kg
# @staticmethod #当一个函数用不到self的时候,可以在上面添加一个装饰器
def dream(self):
return '{}的梦想:世界唯有美食不可辜负!'.format(self.name) # 这里不能用print,不然会直接在后台打印,页面不显示
duck = Food('烤鸭', 2)
pig = Food('烤猪', 50)
sheep = Food('烤全羊', 30)
chicken = Food('炸鸡', 23)
lst = [pig, sheep, chicken]
return render(request, 's.html',
{
'l': l,
'name': name,
'food': duck,
'lst': lst
}
)
# filter语法相关
def m(request):
name = 'Beriuta'
file_size = 10000
a = '<a href="https://www.baidu.com">百度</a>'
p = '在苍茫的大海上,狂风卷积着乌云,在乌云和大海之间,海燕像黑色的闪电,在高傲地飞翔!'
p_1 = '在 苍 茫 的 大 海 上,狂风卷积着乌云,在乌云和大海之间,海燕像黑色的闪电,在高傲地飞翔!'
p_2 = 'aaabsbshsasjahahaayaha'
now = datetime.now() # 获取一个datetime类型的时间
list1 = ['huhu','hehe','didi','shil','sb']
# 获取五个小时之前的时间
hours = now - timedelta(hours=4)
return render(request, 'm.html',
{'name': name,
'file_size':file_size,
'now':now,
'a':a,
'p':p,
'p_1':p_1,
'p_2':p_2,
'list1':list1,
'hours':hours
}
)
|
[
"2457409765@qq.com"
] |
2457409765@qq.com
|
676e52de08ecbf08fbb59988ca58614c255892d8
|
5b4312ddc24f29538dce0444b7be81e17191c005
|
/autoware.ai/1.12.0/devel/.private/vector_map_msgs/lib/python2.7/dist-packages/vector_map_msgs/msg/_RailCrossingArray.py
|
c5485493b9388cf7c0c77f72f2f28ac09a8f6105
|
[
"MIT"
] |
permissive
|
muyangren907/autoware
|
b842f1aeb2bfe7913fb2be002ea4fc426b4e9be2
|
5ae70f0cdaf5fc70b91cd727cf5b5f90bc399d38
|
refs/heads/master
| 2020-09-22T13:08:14.237380
| 2019-12-03T07:12:49
| 2019-12-03T07:12:49
| 225,167,473
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,277
|
py
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from vector_map_msgs/RailCrossingArray.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import vector_map_msgs.msg
import std_msgs.msg
class RailCrossingArray(genpy.Message):
_md5sum = "62d7f260c71b469b058ab28f3bce2ded"
_type = "vector_map_msgs/RailCrossingArray"
_has_header = True #flag to mark the presence of a Header object
_full_text = """Header header
RailCrossing[] data
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
string frame_id
================================================================================
MSG: vector_map_msgs/RailCrossing
# Ver 1.00
int32 id
int32 aid
int32 linkid
"""
__slots__ = ['header','data']
_slot_types = ['std_msgs/Header','vector_map_msgs/RailCrossing[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,data
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(RailCrossingArray, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.data is None:
self.data = []
else:
self.header = std_msgs.msg.Header()
self.data = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.data)
buff.write(_struct_I.pack(length))
for val1 in self.data:
_x = val1
buff.write(_get_struct_3i().pack(_x.id, _x.aid, _x.linkid))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.data is None:
self.data = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.data = []
for i in range(0, length):
val1 = vector_map_msgs.msg.RailCrossing()
_x = val1
start = end
end += 12
(_x.id, _x.aid, _x.linkid,) = _get_struct_3i().unpack(str[start:end])
self.data.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.data)
buff.write(_struct_I.pack(length))
for val1 in self.data:
_x = val1
buff.write(_get_struct_3i().pack(_x.id, _x.aid, _x.linkid))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.data is None:
self.data = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.data = []
for i in range(0, length):
val1 = vector_map_msgs.msg.RailCrossing()
_x = val1
start = end
end += 12
(_x.id, _x.aid, _x.linkid,) = _get_struct_3i().unpack(str[start:end])
self.data.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_3i = None
def _get_struct_3i():
global _struct_3i
if _struct_3i is None:
_struct_3i = struct.Struct("<3i")
return _struct_3i
|
[
"907097904@qq.com"
] |
907097904@qq.com
|
00465e54f148270106631a6302c36d295425dfe7
|
4e678f6967745c3da2a3e8899204e34d99fc3be0
|
/python-quest-1.py
|
6e6922acc790a0eff4ffcedd901c28ea8e2f4366
|
[] |
no_license
|
pratikshah1701/hackerrank
|
bf689a583b462c0fde697abd536ae0638d14ced9
|
73fde2a0d9dc063e1c06d42f0572ce01f5dd04b9
|
refs/heads/master
| 2021-01-19T18:47:15.278811
| 2017-04-13T09:16:27
| 2017-04-13T09:16:27
| 88,381,516
| 1
| 1
| null | 2017-04-16T00:20:00
| 2017-04-16T00:20:00
| null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
#!/usr/bin/env python3
def main():
for i in range(1, int(input())): # More than 2 lines will result in 0 score. Do not leave a blank line also
print((10 ** i - 1) // 9 * i)
if __name__ == '__main__':
main()
|
[
"charles.wangkai@gmail.com"
] |
charles.wangkai@gmail.com
|
6b6f314619290d1abeedd0e25f056113994f73bd
|
4861398f925ae2cc01189577c71c4983fd7d097b
|
/kakao_().py
|
88bd003daf8d8bcebc4b1e7d1320be8a73a7a17d
|
[] |
no_license
|
fightnyy/programmers_algorithm
|
1c682220aedc078f7a184109f06aa95f673d1d43
|
5efe26a12661f3d278bfcca66753ccfd10451002
|
refs/heads/master
| 2023-05-02T15:53:22.116662
| 2021-04-27T00:59:30
| 2021-04-27T00:59:30
| 328,570,715
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 735
|
py
|
def solution(p):
answer = ''
def _is_empty(inputs_e):
if inputs_e == "":
return ""
def _divide(inputs_d):
u, v = inputs_d[:2], inputs_d[2:]
return u,v
def _is_corret(inputs_c):
stack = []
for c in inputs_c:
if c == '(':
stack.append(c)
else:
if stack == []:
return False
else:
stack.pop()
if stack !=[]:
return False
return True
def _check(input_ch):
_is_empty(input_ch)
def _lets_4() :
'('+""
u, v = _divide(p):
_check(v) if _is_correct(u) _lets_4()
return answer
|
[
"fightnyy@naver.com"
] |
fightnyy@naver.com
|
6ff0110e98ad5e642d3aed34dc2cf15987a7b382
|
ad553dd718a8df51dabc9ba636040da740db57cf
|
/.history/app_20181209011747.py
|
f315e62e16bac6e29a0065fac970aaec00ade91b
|
[] |
no_license
|
NergisAktug/E-Commerce-PythonWithFlask-Sqlite3
|
8e67f12c28b11a7a30d13788f8dc991f80ac7696
|
69ff4433aa7ae52ef854d5e25472dbd67fd59106
|
refs/heads/main
| 2023-01-01T14:03:40.897592
| 2020-10-19T20:36:19
| 2020-10-19T20:36:19
| 300,379,376
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,172
|
py
|
import datetime
import sqlite3 as sql
from flask import Flask,flash, request, render_template_string, render_template
from flask import Flask, url_for, render_template, request, redirect, session, escape, render_template_string
from flask_babelex import Babel
from flask_sqlalchemy import SQLAlchemy
from flask_user import current_user, login_required, roles_required
from sqlalchemy.sql import table, column, select
from sqlalchemy import MetaData, create_engine
from flask_user import login_required, roles_required, UserManager, UserMixin
class ConfigClass(object):
SECRET_KEY = 'This is an INSECURE secret!! DO NOT use this in production!!'
SQLALCHEMY_DATABASE_URI = 'sqlite:///eticaret.sqlite'
SQLALCHEMY_TRACK_MODIFICATIONS = False
MAIL_SERVER = 'smtp.gmail.com'
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USE_TLS = False
MAIL_USERNAME = 'nergis.aktug2014@gmail.com'
MAIL_PASSWORD = '05383896877'
MAIL_DEFAULT_SENDER = '"MyApp" <xyz@gmail.com>'
USER_ENABLE_EMAIL = True
USER_ENABLE_USERNAME = False
USER_EMAIL_SENDER_EMAIL = "noreply@example.com"
def create_app():
app = Flask(__name__)
app.config.from_object(__name__ + '.ConfigClass')
db = SQLAlchemy(app)
class Kullanici(db.Model):
__tablename__ = 'Kullanici'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(80), unique=True)
sifre = db.Column(db.String(80))
rolId = db.Column(db.Integer, db.ForeignKey('rol.rolId', ondelete='CASCADE'))
active = db.Column('is_active', db.Boolean(), nullable=False, server_default='1')
def __init__(self, email, sifre):
self.email = email
self.sifre = sifre
self.rolId = 0
class Roller(db.Model):
__tablename__ = 'rol'
rolId = db.Column(db.Integer, primary_key=True)
rolisim = db.Column(db.String(80))
class urunler(db.Model):
__tablename__ = 'urunler'
urun_id = db.Column(db.Integer, primary_key=True)
urunismi = db.Column(db.String(80))
urunresmi = db.Column(db.String(80))
urunFiyati = db.Column(db.Integer)
markaId = db.Column(db.Integer(), db.ForeignKey('markalar.markaId', ondelete='CASCADE'))
def __init__(self, urunismi, urunresmi, urunFiyati,markaId):
self.urunismi =urunismi
self.urunresmi = urunresmi
self.urunFiyati = urunFiyati
self.markaId=markaId
class markalar(db.Model):
__tablename__ = 'markalar'
markaId = db.Column(db.Integer, primary_key=True)
markaadi = db.Column(db.String(80))
marka_modeli = db.Column(db.String(80))
def __init__(self, markaadi, marka_modeli):
self.markaadi = markaadi
self.marka_modeli = marka_modeli
class musteri(db.Model):
__tablename__ = 'musteri'
musteriId = db.Column(db.Integer, primary_key=True)
musteriadi = db.Column(db.String(80))
musterisoyadi = db.Column(db.String(80))
mail = db.Column(db.String(80), unique=True)
telefon = db.Column(db.Integer)
sifre = db.Column(db.String(80))
il = db.Column(db.String(80))
ilce = db.Column(db.String(80))
kullaniciId = db.Column(db.Integer(), db.ForeignKey('Kullanici.id', ondelete='CASCADE'))
def __init__(self, musteriadi, musterisoyadi, mail, telefon, sifre, il, ilce, kullaniciId):
self.musteriadi = musteriadi
self.musterisoyadi = musterisoyadi
self.mail = mail
self.telefon = telefon
self.sifre = sifre
self.il = il
self.ilce = ilce
self.kullaniciId = kullaniciId
class siparis(db.Model):
__tablename__ = 'siparis'
siparisId = db.Column(db.Integer, primary_key=True)
musteriId = db.Column(db.Integer(), db.ForeignKey('musteri.musteriId', ondelete='CASCADE'))
urunId = db.Column(db.Integer(), db.ForeignKey('urunler.urun_id', ondelete='CASCADE'))
siparisno = db.Column(db.Integer)
siparisTarihi = db.Column(db.Integer)
odemeId = db.Column(db.Integer())
def __init__(self, musteriId, urunId, siparisno, siparisTarihi, odemeId):
self.musteriId = musteriId
self.urunId = urunId
self.siparisno = siparisno
self.siparisTarihi = siparisTarihi
self.odemeId = odemeId
db.create_all()
@app.route('/')
def anasayfa():
return render_template('index.html')
@app.route('/kayit', methods=['GET', 'POST'])
def kayit():
if request.method == 'POST':
mail = request.form['email']
parola = request.form['sifre']
yeniKullanici = Kullanici(email=mail, sifre=parola)
db.session.add(yeniKullanici)
db.session.commit()
if yeniKullanici is not None:
mesaj = "Kayıt Başarıyla Sağlanmıştır."
return render_template("index.html", mesaj=mesaj)
else:
return render_template('kayit.html')
@app.route('/admin')
def admin():
return render_template("admin.html")
@app.route('/uye', methods=['GET', 'POST'])
def uye():
return render_template("uyeGirisi.html")
@app.route('/giris', methods=['GET', 'POST'])
def giris():
hata=None
if request.method=='POST':
if request.form['email']!='admin@example.com' or request.form['sifre']!='admin':
if Kullanici.query.filter_by(email=request.form['email'],sifre=request.form['sifre']) is not None:
session['uye_giris']=True
return redirect(url_for('anasayfa'))
else:
hata='hatalı giris yaptınız'
else:
flash('giriş başarılı')
session['admin_giris']=True
return redirect(url_for('admin'))
return render_template('uyeGiris.html',hata=hata)
@app.route('/cikis')
def cikis():
session.pop('admin_giris',None)
session.pop('uye_giris',None)
return render_template("index.html")
@app.route('/urunEkle')
def urunEkle():
tumVeri=urunler.query.all()
return render_template("urunEkle.html",tumVeri=tumVeri)
@app.route('/urunEklemeYap',methods=['POST'])
def urunEklemeYap():
urunismi=request.form['urunismi']
urunResmi=request.form['urunresmi']
urunFiyati=request.form['fiyati']
markaId=request.form['markaId']
yeniUrun=urunler(urunismi=urunismi,urunresmi=urunResmi,urunFiyati=urunFiyati,markaId=markaId)
db.session.add(yeniUrun)
db.session.commit()
return redirect(url_for("urunEkle"))
@app.route("/sil/<string:id>")
def sil(id):
urun=urunler.query.filter_by(urun_id=id).first()
db.session.delete(urun)
db.session.commit()
return redirect(url_for("urunEkle"))
@app.route('/guncelle/<string:id>',methods=['POST','GET'])
def guncelle(id):
if request.method=='POST':
try:
urunismi=request.form['urunismi']
urunresmi=request.form['urunresmi']
fiyati=request.form['fiyati']
markaId=request.form['markaId']
with sql.connect("eticaret.sqlite") as con:
cur=con.cursor()
if urunler.query.filter_by(urun_id=id).first() is not None:
cur.execute("UPDATE urunler SET urunismi=?,urunresmi=?,fiyati=?,markaId=? WHERE urun_id=?",(urunismi,urunresmi,fiyati,markaId,id))
except:
con.rollback()
msg="hata olustu"
finally:
con.close()
return render_template("urunEkle.html",msg=msg)
@app.route('/Markalar')
def Markalar():
tumMarka=markalar.query.all()
return render_template("marka.html",tumMarka=tumMarka)
return app
if __name__ == '__main__':
app=create_app()
app.run(host='127.0.0.1', port=5000, debug=True)
|
[
"nergis.aktug2014@gmail.com"
] |
nergis.aktug2014@gmail.com
|
47ec1eb674eb47c0d01660f188daff40943df5e2
|
b5fb45288ed2a204692051ab78e72d8aa6e5accd
|
/argo_data_scripts/vis/vis_contrast.py
|
51dc949c3ad92268e7c9143fa92182c57a7c0d8f
|
[
"Apache-2.0"
] |
permissive
|
nithinksath96/MMdetection_TensorRT_FP16
|
d4987f003798f5d6d4fe5bde2f30dd5ee2e8596d
|
c8379b209d4deeff9350baf5bbedfc95fb8941f4
|
refs/heads/master
| 2023-02-13T20:00:21.834541
| 2021-01-06T09:24:20
| 2021-01-06T09:24:20
| 327,260,988
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,937
|
py
|
# Merge and convert real-time results
# Optionally, visualize the output
# This script does not need to run in real-time
import argparse
from os import scandir
from os.path import join, isfile
from tqdm import tqdm
import numpy as np
from PIL import Image, ImageFont, ImageDraw
import sys; sys.path.insert(0, '..'); sys.path.insert(0, '.')
from util import mkdir2
from vis.make_videos_numbered import worker_func as make_video
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dir-A', type=str, default=None)
parser.add_argument('--dir-B', type=str, default=None)
parser.add_argument('--horizontal', action='store_true', default=False)
parser.add_argument('--split-pos', type=float, default=0.5)
parser.add_argument('--split-animation', type=str, default=None)
parser.add_argument('--fps', type=float, default=30)
parser.add_argument('--out-dir', type=str, required=True)
parser.add_argument('--vis-scale', type=float, default=1)
parser.add_argument('--seq', type=str, default=None)
parser.add_argument('--make-video', action='store_true', default=False)
parser.add_argument('--overwrite', action='store_true', default=False)
opts = parser.parse_args()
return opts
# Smoothing functions
# map time from 0-1 to progress from 0-1
def ease_in_out(t):
return -np.cos(np.pi*t)/2 + 0.5
# animations
def split_anime_swing(t, split_pos, l, line_width):
# timing information in seconds
durations = [4, 1, 3, 2, 3, 1]
small_end = -line_width//2 - 1
big_end = l + line_width//2
k = 0
last_key = 0
if t < durations[k]:
return split_pos
last_key += durations[k]
k += 1
if t < last_key + durations[k]:
start_pos = split_pos
end_pos = big_end
p = ease_in_out((t - last_key)/durations[k])
return start_pos + p*(end_pos - start_pos)
last_key += durations[k]
k += 1
if t < last_key + durations[k]:
return big_end
last_key += durations[k]
k += 1
if t < last_key + durations[k]:
start_pos = big_end
end_pos = small_end
p = ease_in_out((t - last_key)/durations[k])
return start_pos + p*(end_pos - start_pos)
last_key += durations[k]
k += 1
if t < last_key + durations[k]:
return small_end
last_key += durations[k]
k += 1
if t < last_key + durations[k]:
start_pos = small_end
end_pos = split_pos
p = ease_in_out((t - last_key)/durations[k])
return start_pos + p*(end_pos - start_pos)
return split_pos
def main():
opts = parse_args()
seqs = sorted([item.name for item in scandir(opts.dir_A) if item.is_dir()])
if opts.seq is not None:
if opts.seq.isdigit():
idx = int(opts.seq)
else:
idx = seqs.index(opts.seq)
seqs = [seqs[idx]]
line_width = 15
line_color = [241, 159, 93]
line_color = np.array(line_color, dtype=np.uint8).reshape((1, 1, 3))
# font_path = r'C:\Windows\Fonts\Rock.otf'
# font_path = r'C:\Windows\Fonts\AdobeGurmukhi-Regular.otf'
# font = ImageFont.truetype(font_path, size=40)
for s, seq in enumerate(seqs):
print(f'Processing {s + 1}/{len(seqs)}: {seq}')
seq_dir_A = join(opts.dir_A, seq)
seq_dir_B = join(opts.dir_B, seq)
seq_dir_out = mkdir2(join(opts.out_dir, seq))
frame_list = [item.name for item in scandir(seq_dir_A) if item.is_file() and item.name.endswith('.jpg')]
frame_list = sorted(frame_list)
# frame_list = frame_list[:330]
for ii, frame in enumerate(tqdm(frame_list)):
out_path = join(seq_dir_out, frame)
if not opts.overwrite and isfile(out_path):
continue
img_A = Image.open(join(seq_dir_A, frame))
img_B = Image.open(join(seq_dir_B, frame))
w, h = img_A.size
l = h if opts.horizontal else w
split_pos = opts.split_pos if opts.split_pos > 1 else l*opts.split_pos
if opts.split_animation:
split_pos = globals()['split_anime_' + opts.split_animation](
ii/opts.fps, split_pos, l, line_width,
)
split_pos = int(round(split_pos))
line_start = split_pos - (line_width - 1)//2
line_end = split_pos + line_width//2 # inclusive
# using TrueType supported in PIL
# draw = ImageDraw.Draw(img)
# draw.text(
# (lt[0], lt[1] - font.size),
# text, (*color, 1), # RGBA
# font=font,
# )
if split_pos <= 0:
img = np.array(img_B)
else:
img = np.array(img_A)
img_B = np.asarray(img_B)
if opts.horizontal:
img[split_pos:] = img_B[split_pos:]
else:
img[:, split_pos:] = img_B[:, split_pos:]
if line_start < l and line_end >= 0:
# line is visible
line_start = max(0, line_start)
line_end = min(l, line_end)
if opts.horizontal:
img[line_start:line_end, :] = line_color
else:
img[:, line_start:line_end] = line_color
Image.fromarray(img).save(out_path)
if opts.make_video:
out_path = seq_dir_out + '.mp4'
if opts.overwrite or not isfile(out_path):
print('Making the video')
make_video((seq_dir_out, opts))
else:
print(f'python vis/make_videos_numbered.py "{opts.out_dir}" --fps {opts.fps}')
if __name__ == '__main__':
main()
|
[
"nsathish@compute-0-32.local"
] |
nsathish@compute-0-32.local
|
65f48d5c7bdf634c88c47b5df4e34a43fb7061f4
|
dfaa71f8064d3d0773941cf14ab86ff57ff67284
|
/part35/blog/models.py
|
e096fa50faab3cc7fd3ab30abcc1c60a66a4db46
|
[
"Apache-2.0"
] |
permissive
|
yllew36/WellyGI
|
e94c5000ff3a7f2fd7316d22ad166fbf7916ea23
|
7d53fac4c81bb994f61b22761e5ac7e48994ade4
|
refs/heads/master
| 2020-09-05T15:49:37.386078
| 2019-11-15T08:16:59
| 2019-11-15T08:16:59
| 220,148,061
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 373
|
py
|
from django.db import models
# Create your models here.
class PostModel(models.Model):
judul = models.CharField(max_length=20)
body = models.TextField()
category = models.CharField(max_length=20)
published = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __str__(self):
return "{}. {}" .format(self.id,self.judul)
|
[
"yllew36@gmail.com"
] |
yllew36@gmail.com
|
059945b503bc8e858ff2111eb171506610714b02
|
8e07b5b7a8dd38e0ef2c7ffc97d0392d886f32e6
|
/venv/Lib/site-packages/mypy/typeshed/third_party/2and3/markdown/extensions/legacy_attrs.pyi
|
035b589a9f34a9529d88c4a202238e44166669ca
|
[] |
no_license
|
RodrigoNeto/cursopythonyt
|
fc064a2e6106324e22a23c54bdb9c31040ac9eb6
|
279dad531e21a9c7121b73d84fcbdd714f435e7e
|
refs/heads/master
| 2023-07-03T00:54:09.795054
| 2021-08-13T12:42:24
| 2021-08-13T12:42:24
| 395,646,798
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 313
|
pyi
|
from typing import Any, Pattern
from markdown.extensions import Extension
from markdown.treeprocessors import Treeprocessor
ATTR_RE: Pattern
class LegacyAttrs(Treeprocessor):
def handleAttributes(self, el, txt): ...
class LegacyAttrExtension(Extension): ...
def makeExtension(**kwargs): ...
|
[
"rodrigoneto.forseti@gmail.com"
] |
rodrigoneto.forseti@gmail.com
|
5fc3082edcd76e319526f196ca4f0ac1f08de570
|
d10d6d037ad741e2383eb1bb8bbc5dd3b3f09e41
|
/python3/practice/classic_puzzle/easy/pirates-treasure.py
|
b6637587260c3ffcd0f4ba3521b9a43ac09effcb
|
[] |
no_license
|
jz4o/codingames
|
4614d34a3d9be07205747ee3617479e1e77fd6ed
|
1f2222d23aaf670c7ddb658e7aca1a8e12e9bcab
|
refs/heads/master
| 2023-08-18T03:30:04.640290
| 2023-08-17T14:24:04
| 2023-08-17T14:24:04
| 102,266,024
| 6
| 2
| null | 2021-05-01T13:33:34
| 2017-09-03T13:20:10
|
Java
|
UTF-8
|
Python
| false
| false
| 1,193
|
py
|
# import sys
# import math
# Auto-generated code below aims at helping you parse
# the standard input according to the problem statement.
w = int(input())
h = int(input())
grid = []
grid.append([1] * (w + 2))
for i in range(h):
row = []
row.append(1)
for j in input().split():
v = int(j)
row.append(v)
row.append(1)
grid.append(row)
grid.append([1] * (w + 2))
# Write an answer using print
# To debug: print("Debug messages...", file=sys.stderr, flush=True)
def find_pirates_treasure_indexes(grid):
for row_index, row in enumerate(grid[1:-1], start=1):
for column_index, column in enumerate(row[1:-1], start=1):
if 0 in grid[row_index - 1][column_index - 1:column_index + 2]:
continue
if row[column_index - 1] == 0:
continue
if column != 0:
continue
if row[column_index + 1] == 0:
continue
if 0 in grid[row_index + 1][column_index - 1:column_index + 2]:
continue
return f'{column_index - 1} {row_index - 1}'
result = find_pirates_treasure_indexes(grid)
# print("x y")
print(result)
|
[
"26317360+jz4o@users.noreply.github.com"
] |
26317360+jz4o@users.noreply.github.com
|
4c04be0c1b62494ac0091aebed2588dc3cae107f
|
bcc916eb6c5ce649758c903fba6065f6d53dc6d2
|
/datalad_webapp/__init__.py
|
9ebfad5fa15a49e81b35bb43c19031ae832458dd
|
[
"MIT"
] |
permissive
|
bpoldrack/datalad-webapp
|
ed4d432d3a6fc49eff40bfcc296e5426ae3ab3b6
|
c6107ba0460b7a967cabe5cecbf2b8e18b3fbf49
|
refs/heads/master
| 2021-05-25T11:44:53.770310
| 2018-03-29T13:38:53
| 2018-03-29T13:38:53
| 127,297,116
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,220
|
py
|
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""DataLad webapp support"""
__docformat__ = 'restructuredtext'
import logging
from os.path import dirname
from os.path import basename
from os.path import isdir
from os.path import join as opj
from glob import glob
from datalad import cfg
from pkg_resources import iter_entry_points
from datalad.dochelpers import exc_str
from datalad.utils import assure_list
from datalad.interface.base import Interface
from datalad.interface.base import build_doc
from datalad.support.param import Parameter
from datalad.distribution.dataset import datasetmethod
from datalad.interface.utils import eval_results
from datalad.support.constraints import EnsureNone
from datalad.distribution.dataset import EnsureDataset
# defines a datalad command suite
# this symbold must be indentified as a setuptools entrypoint
# to be found by datalad
module_suite = (
# description of the command suite, displayed in cmdline help
"Generic web app support",
[('datalad_webapp', 'WebApp', 'webapp', 'webapp')]
)
# we want to hook into datalad's logging infrastructure, so we use a common
# prefix
lgr = logging.getLogger('datalad.module.webapp')
@build_doc
class WebApp(Interface):
"""
"""
_params_ = dict(
app=Parameter(
args=('--app',),
doc="yeah!",
nargs='+',
action='append'),
dataset=Parameter(
args=("-d", "--dataset"),
doc="""specify the dataset to serve as the anchor of the webapp.
An attempt is made to identify the dataset based on the current
working directory. If a dataset is given, the command will be
executed in the root directory of this dataset.""",
constraints=EnsureDataset() | EnsureNone()),
daemonize=Parameter(
args=("--daemonize",),
action='store_true',
doc="yeah!"),
)
@staticmethod
@datasetmethod(name='webapp')
@eval_results
def __call__(app, dataset=None, daemonize=False):
apps = assure_list(app)
if not apps:
raise ValueError('no app specification given')
if not isinstance(apps[0], (list, tuple)):
apps = [apps]
apps = {a[0] if isinstance(a, (list, tuple)) else a:
a[1] if isinstance(a, (list, tuple)) and len(a) > 1 else None
for a in apps}
import cherrypy
# global config
cherrypy.config.update({
# prevent visible tracebacks, etc:
# http://docs.cherrypy.org/en/latest/config.html#id14
#'environment': 'production',
#'log.error_file': 'site.log',
})
# set the priority according to your needs if you are hooking something
# else on the 'before_finalize' hook point.
@cherrypy.tools.register('before_finalize', priority=60)
def secureheaders():
headers = cherrypy.response.headers
headers['X-Frame-Options'] = 'DENY'
headers['X-XSS-Protection'] = '1; mode=block'
headers['Content-Security-Policy'] = "default-src='self'"
# only add Strict-Transport headers if we're actually using SSL; see the ietf spec
# "An HSTS Host MUST NOT include the STS header field in HTTP responses
# conveyed over non-secure transport"
# http://tools.ietf.org/html/draft-ietf-websec-strict-transport-sec-14#section-7.2
if (cherrypy.server.ssl_certificate != None and
cherrypy.server.ssl_private_key != None):
headers['Strict-Transport-Security'] = 'max-age=31536000' # one year
if daemonize:
from cherrypy.process.plugins import Daemonizer
Daemonizer(cherrypy.engine).subscribe()
#PIDFile(cherrypy.engine, '/var/run/myapp.pid').subscribe()
# when running on a priviledged port
#DropPrivileges(cherrypy.engine, uid=1000, gid=1000).subscribe()
enabled_apps = []
for ep in iter_entry_points('datalad.webapps'):
if ep.name not in apps:
continue
mount = apps[ep.name] if apps[ep.name] else '/'
# get the webapp class
cls = ep.load()
# fire up the webapp instance
inst = cls(**dict(dataset=dataset))
# mount under global URL tree (default or given suburl)
app = cherrypy.tree.mount(
root=inst,
script_name=mount,
# app config file, it is ok for that file to not exist
config=cls._webapp_config
)
# forcefully impose more secure mode
# TODO might need one (or more) switch(es) to turn things off for
# particular scenarios
enabled_apps.append(ep.name)
app.merge({
'/': {
# turns all security headers on
'tools.secureheaders.on': True,
'tools.sessions.secure': True,
'tools.sessions.httponly': True}})
static_dir = opj(cls._webapp_dir, cls._webapp_staticdir)
if isdir(static_dir):
app.merge({
# the key has to be / even when an app is mount somewhere
# below
'/': {
'tools.staticdir.on': True,
'tools.staticdir.root': cls._webapp_dir,
'tools.staticdir.dir': cls._webapp_staticdir}}
)
failed_apps = set(apps).difference(enabled_apps)
if failed_apps:
lgr.warning('Failed to load webapps: %s', failed_apps)
if not enabled_apps:
return
cherrypy.engine.start()
cherrypy.engine.block()
yield {}
|
[
"michael.hanke@gmail.com"
] |
michael.hanke@gmail.com
|
670cd19658bb9118b6186e4645afd044289372ac
|
82042141439ae004fc38bb2ef6238f36ec6bb050
|
/attendance/settings.py
|
03f8fb508ead9f5f52ebcd48f3f82c239a48a82e
|
[] |
no_license
|
psteichen/clusil-intranet
|
2e9a2cf3b00692a4ef441ebf669af4e63945e9a2
|
5c028d33f6a8559af57a4eeb02fc0f612cb1b261
|
refs/heads/master
| 2021-07-13T15:40:06.464105
| 2020-06-30T19:51:00
| 2020-06-30T19:51:00
| 27,195,950
| 2
| 1
| null | 2021-06-10T20:06:47
| 2014-11-26T20:59:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,458
|
py
|
# Application settings for attendance app
# coding=utf-8
ACTIONS = {
'invite': (
{
'label' : u'Invite (a) collegue(s).',
'grade' : 'info',
'icon' : 'plus',
'url' : '/meetings/invite/',
},
),
}
ATTENDANCE_TMPL_CONTENT = {
'template' : 'done.html',
'too_late' : u'Sorry, it is <strong>too late</strong> to confirm/cancel your participation!',
# 'actions' : ACTIONS['invite'],
'actions' : None,
'yes' : u'%(name)s, herewith your <strong>participation</strong> is <strong>confirmed</strong>!',
'no' : u'%(name)s, thank you for notifying us your cancellation, you will be <strong>excused</strong>!',
'details' : u'''<p class="lead">
<ul class="lead">
Title: <em>%(title)s</em><br/>
Location: <strong>%(location)s</strong><br/>
Date: <em>%(when)s</em><br/>
Time: %(time)s<br/>
</ul></p>
''',
'event': {
'title' : u'Your participation to the following event: "%(event)s"',
'email' : {
'yes' : u'''
Herewith your participation to "%(event)s" is *confirmed*!''',
'no' : u'''
Thank you for notifying us your cancellation for "%(event)s".
You will be *excused*.''',
},
},
'meeting': {
'title' : u'%(meeting)s meeting',
'email' : {
'yes' : u'''
Herewith your participation to "%(meeting)s" is *confirmed*!''',
'no' : u'''
Thank you for notifying us your cancellation for "%(meeting)s".
You will be *excused*.''',
},
},
}
|
[
"pst@libre.lu"
] |
pst@libre.lu
|
c5e49b4de1ac2ec9e848c99af5781b7e842799cf
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/dev/cv/image_classification/RANet_ID0994_for_PyTorch/dataloader.py
|
8d73f52aa9fc6f2e781b3bfbb395740fb1153839
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 6,745
|
py
|
#
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
#
import os
import torch
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torch.npu
import os
NPU_CALCULATE_DEVICE = 0
if os.getenv('NPU_CALCULATE_DEVICE') and str.isdigit(os.getenv('NPU_CALCULATE_DEVICE')):
NPU_CALCULATE_DEVICE = int(os.getenv('NPU_CALCULATE_DEVICE'))
if torch.npu.current_device() != NPU_CALCULATE_DEVICE:
torch.npu.set_device(f'npu:{NPU_CALCULATE_DEVICE}')
def get_dataloaders(args):
train_loader, val_loader, test_loader = None, None, None
if args.data == 'cifar10':
normalize = transforms.Normalize(mean=[0.4914, 0.4824, 0.4467],
std=[0.2471, 0.2435, 0.2616])
train_set = datasets.CIFAR10(args.data_root, train=True,
transform=transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
]))
val_set = datasets.CIFAR10(args.data_root, train=False,
transform=transforms.Compose([
transforms.ToTensor(),
normalize
]))
elif args.data == 'cifar100':
normalize = transforms.Normalize(mean=[0.5071, 0.4867, 0.4408],
std=[0.2675, 0.2565, 0.2761])
train_set = datasets.CIFAR100(args.data_root, train=True,
transform=transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
]))
val_set = datasets.CIFAR100(args.data_root, train=False,
transform=transforms.Compose([
transforms.ToTensor(),
normalize
]))
else:
# ImageNet
traindir = os.path.join(args.data_root, 'train')
valdir = os.path.join(args.data_root, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_set = datasets.ImageFolder(traindir, transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
]))
val_set = datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
]))
if args.use_valid:
train_set_index = torch.randperm(len(train_set))
if os.path.exists(os.path.join(args.save, 'index.pth')):
print('!!!!!! Load train_set_index !!!!!!')
train_set_index = torch.load(os.path.join(args.save, 'index.pth'))
else:
print('!!!!!! Save train_set_index !!!!!!')
torch.save(train_set_index, os.path.join(args.save, 'index.pth'))
if args.data.startswith('cifar'):
num_sample_valid = 5000
else:
num_sample_valid = 50000
if 'train' in args.splits:
train_loader = torch.utils.data.DataLoader(
train_set, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(
train_set_index[:-num_sample_valid]),
num_workers=args.workers, pin_memory=False)
if 'val' in args.splits:
val_loader = torch.utils.data.DataLoader(
train_set, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(
train_set_index[-num_sample_valid:]),
num_workers=args.workers, pin_memory=False)
if 'test' in args.splits:
test_loader = torch.utils.data.DataLoader(
val_set,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=False)
else:
if 'train' in args.splits:
train_loader = torch.utils.data.DataLoader(
train_set,
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=False)
if 'val' or 'test' in args.splits:
val_loader = torch.utils.data.DataLoader(
val_set,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=False)
test_loader = val_loader
return train_loader, val_loader, test_loader
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
485f03dae69df4b6ada9fb3ae69c80c9a25a63be
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03018/s073874742.py
|
f981861b7dd86b3250675915160c0c2a28113c5c
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 516
|
py
|
S = input()
rlt = 0
b = ''
tA = 0
A = 0
BC = 0
for s in S:
if s == 'A':
if b == 'A':
tA += 1
elif b == 'B':
rlt += A*BC
tA = 1
A = 0
BC = 0
else:
rlt += A*BC
tA = A + 1
A = 0
BC = 0
elif s == 'B':
if b == 'B':
rlt += A*BC
tA = 0
A = 0
BC = 0
elif s == 'C':
if b == 'B':
A += tA
tA = 0
BC += 1
else:
rlt += A*BC
tA = 0
A = 0
BC = 0
b = s
rlt += A*BC
print(rlt)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
5086248faf63027b66d09017218bbf3019c4625f
|
881a76acaf0b6a26fd34548f0b1abbcf176a37b2
|
/ezgal/__init__.py
|
70fdcaa0ab7a97b7b8f4f8afe7ec51d6a94d571a
|
[
"MIT"
] |
permissive
|
gsnyder206/ezgal
|
930ed644b00332e3fc1f733a32afc72d511b7cb0
|
f10e57021ca88c7139a28b058b8716c5507ce48c
|
refs/heads/master
| 2021-01-11T11:17:09.551803
| 2017-01-11T16:44:50
| 2017-01-11T16:44:50
| 78,657,426
| 0
| 0
| null | 2017-01-11T16:28:50
| 2017-01-11T16:28:50
| null |
UTF-8
|
Python
| false
| false
| 1,767
|
py
|
import ezgal,utils,astro_filter,ezgal_light,wrapper,sfhs,weight,dusts
__all__ = ["model", "utils", "wrapper", "sfhs", "weight"]
__author__ = 'Conor Mancone, Anthony Gonzalez'
__email__ = 'cmancone@gmail.com'
__ver__ = '2.0'
ezgal = ezgal.ezgal
model = ezgal
astro_filter = astro_filter.astro_filter
ezgal_light = ezgal_light.ezgal_light
wrapper = wrapper.wrapper
weight = weight.weight
def interpolate( values, xs, models=None, key=None, return_wrapper=False ):
""" models = ezgal.interpolate( values, xs, models, return_wrapper=False )
or
models = ezgal.interpolate( values, models, key=meta_key, return_wrapper=False )
Interpolate between EzGal models and return new models.
`models` is a list of EzGal model objects or filenames of EzGal compatible files.
`xs` is the values of the models to be interpolated between and `values` is a list
of values for the new models to be interpolated at.
Alternatively you can ignore xs and specify the name of a meta key
to use to build the interpolation grid.
Returns a list of EzGal model objects or a single EzGal model if a scalar is passed
for `values`. Alternatively, set return_wrapper=True and it will return an ezgal wrapper
object containing the fitted models objects.
All model SEDs must have the same age/wavelength grid. """
# what calling sequence was used?
if models is None and key is not None:
return wrapper( xs ).interpolate( key, values, return_wrapper=return_wrapper )
# make sure we have everything we need...
if len( models ) != len( xs ): raise ValueErrors( 'xs list has a different length than models list!' )
# return interpolated models
return wrapper( models, extra_data=xs, extra_name='interp' ).interpolate( 'interp', values, return_wrapper=return_wrapper )
|
[
"cmancone@gmail.com"
] |
cmancone@gmail.com
|
2929024e5b2b882ef9940eb706da9e2ad2f7e2c7
|
482297526ed7eedc7c3600a4b98c45e775065852
|
/emsapi/models/adi_ems_web_shared_tableau_rest_view_py3.py
|
9815d11e7c037465cd2c29f30ceddec172074ad0
|
[
"MIT"
] |
permissive
|
ge-flight-analytics/emsapi-python
|
9c441095c0f9c9dc8a42ee918f830a5261f4f4d1
|
d3de16397c6f3a35c0965e4dfc76741d1379145e
|
refs/heads/master
| 2023-08-03T20:47:45.950550
| 2023-07-26T16:13:01
| 2023-07-26T16:13:01
| 233,153,982
| 0
| 1
|
MIT
| 2023-07-26T16:13:31
| 2020-01-11T00:38:20
|
Python
|
UTF-8
|
Python
| false
| false
| 1,008
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AdiEmsWebSharedTableauRestView(Model):
"""AdiEmsWebSharedTableauRestView.
:param id:
:type id: str
:param name:
:type name: str
:param content_url:
:type content_url: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'content_url': {'key': 'contentUrl', 'type': 'str'},
}
def __init__(self, *, id: str=None, name: str=None, content_url: str=None, **kwargs) -> None:
super(AdiEmsWebSharedTableauRestView, self).__init__(**kwargs)
self.id = id
self.name = name
self.content_url = content_url
|
[
"cody.owens@ge.com"
] |
cody.owens@ge.com
|
972e173dd684f111e265003215339e68adb7e6fb
|
cac36b279aa1ea889a5e3803efd615449c55922b
|
/src/012_costMinimize.py
|
2d75190ae9e5e749cffd703687dcfa87d42aa3ce
|
[] |
no_license
|
GunSik2/ml
|
6e16cbbe04406686e394f59c634ed105bf90f849
|
bdc7ad59c113e13eb13d01d05e30ec77cc96035f
|
refs/heads/master
| 2020-06-12T09:53:30.965606
| 2017-01-05T03:37:34
| 2017-01-05T03:37:34
| 75,591,930
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,067
|
py
|
import tensorflow as tf
# Single variable linear regression
# Hypothesis: H(x) = Wx + b
# Cost Fun.: cost(w,b) = 1/m * Sum(H(x) - y)^2
# Gradient descent: W := W - alpah * 1/m * Sum((W*x - y) * x)
# training data
x = [1., 2., 3., 4.]
y = [2., 4., 6., 8.]
# Initial value (w, b)
W = tf.Variable(tf.random_uniform([1], -10000., 10000.))
b = 0
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
# Hypothesis
hypothesis = W * X + b
# Cost function
cost = tf.reduce_mean(tf.square(hypothesis - Y))
# Gradicent descent // manual implementation
W1 = W - tf.mul(0.1, tf.reduce_mean(tf.mul( ( tf.mul(W, X) - Y ), X ), ))
update = W.assign(W1)
# launch
# before starting, initialize the variables
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
# fit the line
for step in range(1000):
sess.run(update, feed_dict={X: x, Y: y})
print(step, sess.run(cost, feed_dict={X: x, Y: y}), sess.run(W))
# learns best fit is w: [2] b: [0]
print(sess.run(hypothesis, feed_dict={X: 5}))
print(sess.run(hypothesis, feed_dict={X: 2.5}))
|
[
"you@example.com"
] |
you@example.com
|
272780c50836d3f265770a3986bd720199357fd3
|
5337ddfe3adf3a044bae5cdd530e8836b9000db1
|
/tests/structshape_test.py
|
0766479528b059d1cc38582a4d7d205a6395bf01
|
[] |
no_license
|
404cafe/Swampy
|
ef1d3206e9fece098910fb41d542ac7195dde50a
|
82fdafb27e219d0544b74a745f516bfb2264fdaf
|
refs/heads/master
| 2020-12-01T06:27:08.869741
| 2020-03-04T08:41:59
| 2020-03-04T08:41:59
| 230,575,461
| 0
| 0
| null | 2019-12-28T07:30:18
| 2019-12-28T07:30:17
| null |
UTF-8
|
Python
| false
| false
| 1,260
|
py
|
"""This module is part of Swampy, a suite of programs available from
allendowney.com/swampy.
Copyright 2011 Allen B. Downey
Distributed under the GNU General Public License at gnu.org/licenses/gpl.html.
"""
import unittest
from swampy.structshape import structshape
class Tests(unittest.TestCase):
def test_lumpy(self):
t = [1,2,3]
self.assertEqual(structshape(t), 'list of 3 int')
t2 = [[1,2], [3,4], [5,6]]
self.assertEqual(structshape(t2), 'list of 3 list of 2 int')
t3 = [1, 2, 3, 4.0, '5', '6', [7], [8], 9]
self.assertEqual(structshape(t3),
'list of (3 int, float, 2 str, 2 list of int, int)')
class Point:
"""trivial object type"""
t4 = [Point(), Point()]
self.assertEqual(structshape(t4), 'list of 2 Point')
s = set('abc')
self.assertEqual(structshape(s), 'set of 3 str')
lt = list(zip(t, s))
self.assertEqual(structshape(lt), 'list of 3 tuple of (int, str)')
d = dict(lt)
self.assertEqual(structshape(d), 'dict of 3 int->str')
it = iter('abc')
self.assertEqual(structshape(it), 'str_iterator of 3 str')
if __name__ == '__main__':
unittest.main()
|
[
"downey@allendowney.com"
] |
downey@allendowney.com
|
1e0051a965e57364e9feb7ed0b8838ad82f4619a
|
95978243568bee7c33a2d6c100f42e0c5a15c933
|
/coremltools/converters/mil/mil/ops/defs/iOS16/tensor_transformation.py
|
473b7c683407b03e7a405cbd4224e62ba2dda7be
|
[
"BSD-3-Clause"
] |
permissive
|
cclauss/coremltools
|
6cc68874c45ce1035b1b59417eacfdce738d725d
|
0c63b0aeb63acedce0d39446c19b80cc47d57a7a
|
refs/heads/master
| 2023-05-01T16:56:43.042564
| 2023-04-14T20:16:24
| 2023-04-14T20:16:24
| 106,254,817
| 0
| 1
| null | 2017-10-09T08:11:43
| 2017-10-09T08:11:43
| null |
UTF-8
|
Python
| false
| false
| 6,920
|
py
|
# Copyright (c) 2022, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clausefrom coremltools.converters.mil.mil import types
import numpy as np
from coremltools.converters.mil.mil import types
from coremltools.converters.mil.mil.input_type import (InputSpec,
TensorInputType,
TupleInputType)
from coremltools.converters.mil.mil.operation import Operation
from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op
from coremltools.converters.mil.mil.ops.defs.iOS16 import _IOS16_TARGET
from coremltools.converters.mil.mil.types.symbolic import any_symbolic
@register_op(opset_version=_IOS16_TARGET)
class reshape_like(Operation):
"""
Reshape a tensor to an output shape specified by some or all dimensions of a tuple of reference tensors ``ref_tensors``.
Parameters
----------
x: tensor<\*?, T> (Required)
* The input tensor to be reshaped.
ref_tensors: Tuple[tensor<\*?, R>] (Required)
* A tuple of tensors that define the output shape.
begins: Tuple[const<int32>] (Required)
* A tuple of integers specifying the begin index into the shape vector of the corresponding ``ref_tensor``.
ends: Tuple[const<int32>] (Required)
* A tuple of integers specifying the end index into the shape vector of the corresponding ``ref_tensor``.
end_masks: Tuple[const<bool>] (Required)
* If ``True``, select all axes from the begin index until the end of the corresponding ``ref_tensor``, as in
``ref_tensors[i].shape[begins[i]:]``.
Notes
-----
The output shape is computed as follows:
.. sourcecode:: python
output_shape = []
num_of_refs = len(begins)
for i in range(num_of_refs):
if end_masks[i]:
output_shape.append(ref_tensor_i.shape[begins[i]:])
else:
output_shape.append(ref_tensor_i.shape[begins[i]:ends[i]])
output_shape = np.concat(output_shape, axis=0)
The following is an example:
.. sourcecode:: python
ref_tensors=[tensor[2, 3, 4], tensor[1, 5, 6]]
begins=[0, 1]
ends=[2, 0]
end_masks=[False, True]
The output shape would be ``(2, 3, 5, 6)``.
Returns
-------
tensor<\*?, T>
* Same type as input tensor ``x``.
* Output shape is computed by ``ref_tensors``, ``begins``, ``ends``, and ``end_masks``.
Attributes
----------
T: fp16, fp32, i32, bool
R: fp16, fp32, i32, bool
"""
input_spec = InputSpec(
x=TensorInputType(type_domain="T"),
ref_tensors=TupleInputType(),
begins=TupleInputType(),
ends=TupleInputType(),
end_masks=TupleInputType(),
)
type_domains = {
"T": (types.fp16, types.fp32, types.int32, types.bool),
}
def _check_is_const_tuple_with_scalar(self, param, expected_type, param_name):
"""
This utility function checks the param is a Tuple of scalar with expected data type.
"""
for x in param:
if x.dtype != expected_type or x.shape != ():
msg = "In op reshape_like {}, {} must be a Tuple of scalar {}. Got a {} tensor with shape {}.".format(
self.name,
param_name,
expected_type.__type_info__(),
x.dtype.__type_info__(),
x.shape,
)
raise ValueError(msg)
def type_inference(self):
# Validation the inputs
ref_number = len(self.ref_tensors)
if len(self.begins) != ref_number or len(self.ends) != ref_number or len(self.end_masks) != ref_number:
msg = (
"Op reshape_like {}'s ref_tensors, begins, ends and end_masks must have exactly the same length. "
"Got {}, {}, {} and {}."
).format(self.name, ref_number, len(self.begins), len(self.ends), len(self.end_masks))
self._check_is_const_tuple_with_scalar(self.begins, types.int32, "begins")
self._check_is_const_tuple_with_scalar(self.ends, types.int32, "ends")
self._check_is_const_tuple_with_scalar(self.end_masks, types.bool, "end_masks")
# Compute the output shape
out_shape = ()
for ref_tensor, begin, end, end_mask in zip(self.ref_tensors, self.begins, self.ends, self.end_masks):
shape = ref_tensor.shape
begin, end, end_mask = begin.val, end.val, end_mask.val
ref_shape = shape[begin:end] if not end_mask else shape[begin:]
out_shape += tuple(ref_shape)
# Output shape must be known at compile time
if any_symbolic(out_shape):
msg = "Output shape of a reshape_like op {} must not be symbolic. Got {}".format(self.name, out_shape)
raise ValueError(msg)
# Output shape must be consistent with the input shape
if not any_symbolic(self.x.shape):
if np.prod(self.x.shape) != np.prod(out_shape):
msg = "At reshape_like op {}, input shape {} not consistent with the output shape {}.".format(
self.name,
self.x.shape,
out_shape
)
raise ValueError(msg)
return types.tensor(self.x.dtype, out_shape)
@register_op(opset_version=_IOS16_TARGET)
class pixel_unshuffle(Operation):
"""
Rearrange elements in a tensor from spatial dimensions into depth (channel).
It is basically the inverse operation of `pixel_shuffle <#coremltools.converters.mil.mil.ops.defs.iOS15.tensor_transformation.pixel_shuffle>`_.
Equivalent to PyTorch's ``PixelUnshuffle``.
Parameters
----------
x: tensor<[n, C, H / f , W / f], T> (Required)
* Input tensor of rank ``4``.
downscale_factor: const<i32>
* Factor to decrease spatial resolution by.
Returns
-------
tensor<[n, C * f^2, H, W], T>
* Where ``f`` is the downscale factor.
Attributes
----------
T: fp16, fp32
References
----------
`torch.nn.PixelUnshuffle <https://pytorch.org/docs/stable/generated/torch.nn.PixelUnshuffle.html>`_
"""
input_spec = InputSpec(
x=TensorInputType(type_domain="T"),
downscale_factor=TensorInputType(const=True, type_domain=types.uint32),
)
type_domains = {
"T": (types.fp16, types.fp32),
}
def type_inference(self):
x_type = self.x.dtype
n, c, h, w = self.x.shape
f = self.downscale_factor.val
ret_shape = (n, c * f * f, h / f, w / f)
return types.tensor(x_type, ret_shape)
|
[
"noreply@github.com"
] |
cclauss.noreply@github.com
|
729cda05f7cf671ee84fa46f47321cf0aba2496a
|
b1c97831338b0c2f8099a56f23bddf394561e0e3
|
/Chapter 05/queueJoin.py
|
440160eac7fb2079148b0631868f55e20a0c9aca
|
[
"MIT"
] |
permissive
|
PacktPublishing/Learning-Concurrency-in-Python
|
03b242f64a0b6515a41ceccab86936dc54b20e15
|
bafc928ce9edc601e3def4441b51555ede13c973
|
refs/heads/master
| 2023-02-06T00:51:37.297067
| 2023-01-30T08:05:25
| 2023-01-30T08:05:25
| 100,243,064
| 77
| 51
|
MIT
| 2018-10-26T12:14:45
| 2017-08-14T08:03:03
|
Python
|
UTF-8
|
Python
| false
| false
| 530
|
py
|
import threading
import queue
import time
def mySubscriber(queue):
time.sleep(1)
while not queue.empty():
item = queue.get()
if item is None:
break
print("{} removed {} from the queue".format(threading.current_thread(), item))
queue.task_done()
myQueue = queue.Queue()
for i in range(5):
myQueue.put(i)
print("Queue Populated")
thread = threading.Thread(target=mySubscriber, args=(myQueue,))
thread.start()
print("Not Progressing Till Queue is Empty")
myQueue.join()
print("Queue is now empty")
|
[
"ketank@packtpub.com"
] |
ketank@packtpub.com
|
bb8ab784baecc945d7331bba2ea31191ffddd9fb
|
10d98fecb882d4c84595364f715f4e8b8309a66f
|
/f_net/configs/classification.py
|
2d1e0d6deb8ebaa8cff5ae3928c884995cda95e1
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
afcarl/google-research
|
51c7b70d176c0d70a5ee31ea1d87590f3d6c6f42
|
320a49f768cea27200044c0d12f394aa6c795feb
|
refs/heads/master
| 2021-12-02T18:36:03.760434
| 2021-09-30T20:59:01
| 2021-09-30T21:07:02
| 156,725,548
| 1
| 0
|
Apache-2.0
| 2018-11-08T15:13:53
| 2018-11-08T15:13:52
| null |
UTF-8
|
Python
| false
| false
| 2,352
|
py
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config for fine-tuning on the GLUE and SuperGLUE benchmarks."""
from f_net.configs import base as base_config
from f_net.configs.base import ModelArchitecture
from f_net.configs.base import TrainingMode
def get_config():
"""Config for fine-tuning (classification)."""
config = base_config.get_config()
# Determines which model to use.
config.model_arch: ModelArchitecture = ModelArchitecture.F_NET
config.mode: TrainingMode = TrainingMode.CLASSIFICATION
# This is either "glue/DS_g", where DS_g is one of the following:
# [cola, sst2, mrpc, qqp, stsb, mnli, qnli, rte, wnli].
config.dataset_name: str = "glue/rte"
# How often to save the model checkpoint.
config.save_checkpoints_steps: int = 200
# Training metrics will be computed (1 / eval_proportion) times during
# training at regularly spaced intervals, regardless of dataset size.
config.eval_proportion: float = 0.05
# Total batch size for training.
config.train_batch_size: int = 64
# Total batch size for eval (and predictions).
config.eval_batch_size: int = 32
# The base learning rate for Adam.
config.learning_rate: float = 1e-5
# Total number of training epochs to perform.
config.num_train_epochs: float = 3
# Proportion of training to perform linear learning rate warmup for.
# E.g., 0.1 = 10% of training steps.
config.warmup_proportion: float = 0.1
# Maximum number of eval steps on validation split. Actual number of step may
# be less for small eval datasets.
config.max_num_eval_steps: int = int(1e5)
# Initial checkpoint directory or filepath (usually from a pre-trained model).
config.init_checkpoint_dir: str = ""
# Dummy attribute for repeated runs.
config.trial: int = 0
return config
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
09d1805c4d1b7e98881a9a7b658b3a20a6a61e32
|
2c07ae4239d217f4a4b3d356ca4be31629dea4d5
|
/assets.py
|
2c3cc15e2b9d049fc77ed0707642012353ae75fc
|
[] |
no_license
|
themylogin/thelogin.ru
|
771afe0e3afbdc1072695fb2d4920f6ec3b7c6d3
|
1f66ff940dfafe6d065c63e832d51b5e16522edc
|
refs/heads/master
| 2023-04-25T22:02:56.004822
| 2020-03-22T18:02:40
| 2020-03-22T18:02:40
| 7,691,963
| 0
| 0
| null | 2023-04-15T01:17:22
| 2013-01-18T18:31:57
|
Python
|
UTF-8
|
Python
| false
| false
| 1,157
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import cssmin
import glob
import jsmin
import os
from config import config
assets = {}
assets_dir = os.path.join(config.path, "asset")
for asset, function, separator in [("css", cssmin.cssmin, ""), ("js", jsmin.jsmin, ";")]:
asset_dir = os.path.join(assets_dir, asset)
asset_list = filter(lambda filename: filename.endswith("." + asset) and not filename.startswith("packed-"), sorted(os.listdir(asset_dir)))
if config.debug:
assets[asset] = asset_list
else:
asset_time = int(max([os.stat(os.path.join(asset_dir, filename)).st_mtime for filename in asset_list]))
asset_packed = "packed-%d.%s" % (asset_time, asset)
asset_packed_path = os.path.join(asset_dir, asset_packed)
if not os.path.exists(asset_packed_path):
map(os.unlink, glob.glob(os.path.join(asset_dir, "packed-*")))
open(asset_packed_path, "w").write(separator.join([
(function if function else lambda x: x)(open(os.path.join(asset_dir, filename)).read())
for filename in asset_list
]))
assets[asset] = [asset_packed]
|
[
"themylogin@gmail.com"
] |
themylogin@gmail.com
|
a6c00c649c8adf9f2fd19814f9961b8051ffa0c4
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/dccf58efe393b6912faf2e89cf7e87942e28273e-<_connect>-fix.py
|
166d99bac0993991bbe9ef5d461007d89ccd7f3a
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 427
|
py
|
def _connect(self):
if (not HAS_WINRM):
raise AnsibleError(('winrm or requests is not installed: %s' % to_text(WINRM_IMPORT_ERR)))
elif (not HAS_XMLTODICT):
raise AnsibleError(('xmltodict is not installed: %s' % to_text(XMLTODICT_IMPORT_ERR)))
super(Connection, self)._connect()
if (not self.protocol):
self.protocol = self._winrm_connect()
self._connected = True
return self
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
448bcd261807fa05a45781f836527a3b1d6f27f8
|
8d2e5b5ea408579faa699c09bdbea39e864cdee1
|
/ufora/util/ThreadSafeDict.py
|
75509f50e6a673f9c5cf633da81f7f424242c589
|
[
"dtoa",
"MIT",
"BSD-3-Clause",
"BSL-1.0",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] |
permissive
|
iantuioti/ufora
|
2218ef4c7e33c171268ce11458e9335be7421943
|
04db96ab049b8499d6d6526445f4f9857f1b6c7e
|
refs/heads/master
| 2021-01-17T17:08:39.228987
| 2017-01-30T16:00:45
| 2017-01-30T16:00:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,955
|
py
|
# Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import threading
import thread
class ThreadSafeDict:
def __init__(self, vals = dict()):
self.vals = dict(vals)
self.lock = threading.RLock()
def __str__(self):
return self.vals.__str__()
def __getitem__(self, index):
with self.lock:
return self.vals[index]
def __setitem__(self, index, val):
with self.lock:
self.vals[index] = val
def updateItem(self, index, f, noneisdel = True):
with self.lock:
self.vals[index] = f(self.vals[index] if self.vals.has_key(index) else None)
if noneisdel and self.vals[index] is None:
del self.vals[index]
def update(self, foreignDict):
with self.lock:
self.vals.update(foreignDict)
def has_key(self, index):
with self.lock:
return self.vals.has_key(index)
def keys(self):
with self.lock:
return list(self.vals.keys())
def __delitem__(self, key):
with self.lock:
del self.vals[key]
def __len__(self):
with self.lock:
return len(self.vals)
def scan(self,f):
with self.lock:
for k in self.vals:
f(k, self.vals[k])
def dictCopy(self):
with self.lock:
return dict(self.vals)
|
[
"braxton.mckee@gmail.com"
] |
braxton.mckee@gmail.com
|
e4cb407440b02134b7fb10f738ba19ed0698aa2f
|
786de89be635eb21295070a6a3452f3a7fe6712c
|
/pyimgalgos/tags/V00-00-13/src/image_crop.py
|
3c24a7427ffe5f080f718d92b120ac8d33ba992e
|
[] |
no_license
|
connectthefuture/psdmrepo
|
85267cfe8d54564f99e17035efe931077c8f7a37
|
f32870a987a7493e7bf0f0a5c1712a5a030ef199
|
refs/heads/master
| 2021-01-13T03:26:35.494026
| 2015-09-03T22:22:11
| 2015-09-03T22:22:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,994
|
py
|
#--------------------------------------------------------------------------
# File and Version Information:
# $Id$
#
# Description:
# Pyana/psana user analysis module image_crop...
#
#------------------------------------------------------------------------
"""User analysis module for pyana and psana frameworks.
This software was developed for the LCLS project. If you use all or
part of it, please give an appropriate acknowledgment.
@version $Id: template!pyana-module!py 2987 2012-02-25 03:28:58Z salnikov@SLAC.STANFORD.EDU $
@author Mikhail S. Dubrovin
"""
#------------------------------
# Module's version from SVN --
#------------------------------
__version__ = "$Revision: 2987 $"
# $Source$
#--------------------------------
# Imports of standard modules --
#--------------------------------
import sys
import logging
#-----------------------------
# Imports for other modules --
#-----------------------------
#from psana import *
import numpy as np
class image_crop (object) :
"""Get image from the evt store crop it and put it back in the evt store"""
def __init__ ( self ) :
"""Class constructor.
Parameters are passed from pyana.cfg configuration file.
All parameters are passed as strings
@param source string, address of Detector.Id:Device.ID
@param key_in string, keyword for input image 2-d array
@param key_out string, unique keyword for output image array
@param rowmin int, row minimal to crop image (dafault = 0 - for full size)
@param rowmax int, row maximal to crop image (dafault = -1 - for full size)
@param colmin int, column minimal to crop image (dafault = 0 - for full size)
@param colmax int, column maximal to crop image (dafault = -1 - for full size)
@param print_bits int, bit-word for verbosity control
"""
self.m_src = self.configSrc ('source', '*-*|Cspad-*')
self.m_key_in = self.configStr ('key_in', 'image_in')
self.m_key_out = self.configStr ('key_out', 'image_out')
self.rowmin = self.configInt ('rowmin', 0)
self.rowmax = self.configInt ('rowmax', -1)
self.colmin = self.configInt ('colmin', 0)
self.colmax = self.configInt ('colmax', -1)
self.m_print_bits = self.configInt ('print_bits', 1)
self.counter = 0
if self.m_print_bits & 1 : self.print_input_pars()
def beginjob( self, evt, env ) : pass
def beginrun( self, evt, env ) :
self.run = evt.run()
self.exp = env.experiment()
self.evnum = 0
def begincalibcycle( self, evt, env ) : pass
def event( self, evt, env ) :
"""This method is called for every L1Accept transition.
@param evt event data object
@param env environment object
"""
# Should work for both pyana and pytonic-psana (as compatability method):
#print '\nimage_crop: evt.keys():', evt.keys()
if env.fwkName() == "psana":
#self.arr = evt.get(np.ndarray, self.m_key_in)
self.arr = evt.get(np.ndarray, self.m_src, self.m_key_in)
else :
self.arr = evt.get(self.m_key_in)
self.counter +=1
if self.arr is None :
#if self.m_print_bits & 32 :
msg = __name__ + ': WARNING! CSPAD array object %s is not found in evt' % self.m_key_in
#logging.info( msg )
print msg
return
if self.m_print_bits & 2 and self.counter == 1 :
self.print_image_parameters()
self.img2d = np.array(self.arr[self.rowmin:self.rowmax, self.colmin:self.colmax])
#self.img2d = self.arr
#evt.put( self.img2d, self.m_key_out ) # save image in event as 2d numpy array
evt.put( self.img2d, self.m_src, self.m_key_out ) # save image in event as 2d numpy array
def endcalibcycle( self, evt, env ) : pass
def endrun ( self, evt, env ) : pass
def endjob ( self, evt, env ) : pass
#-----------------------------
def print_input_pars( self ) :
msg = '\n%s: List of input parameters\n source %s\n key_in %s\n key_out %s\n print_bits: %4d' % \
(__name__ , self.m_src, self.m_key_in, self.m_key_out, self.m_print_bits) + \
'\n rowmin %s\n rowmax %s\n colmin %s\n colmax %s\n' % \
(self.rowmin, self.rowmax, self.colmin, self.colmax)
#logging.info( msg )
print msg
def print_image_parameters( self ) :
msg = '%s: Input image parameters for run = %s:\n' % (__name__, self.run) \
+ ' shape = %s' % str(self.arr.shape) \
+ ' dtype = %s' % str(self.arr.dtype)
# + '\narray:\n' + str(self.arr)
#logging.info( msg )
print msg
#-----------------------------
#-----------------------------
|
[
"dubrovin@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7"
] |
dubrovin@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7
|
7fef47453d50a19daade3ccb1cac7c5cf2db9935
|
3f6c16ea158a8fb4318b8f069156f1c8d5cff576
|
/.PyCharm2019.1/system/python_stubs/-1317042838/_codecs.py
|
19d1dd99bb98e64143e5afef560b11128f7229a6
|
[] |
no_license
|
sarthak-patidar/dotfiles
|
08494170d2c0fedc0bbe719cc7c60263ce6fd095
|
b62cd46f3491fd3f50c704f0255730af682d1f80
|
refs/heads/master
| 2020-06-28T23:42:17.236273
| 2019-10-01T13:56:27
| 2019-10-01T13:56:27
| 200,369,900
| 0
| 0
| null | 2019-08-03T12:56:33
| 2019-08-03T11:53:29
|
Shell
|
UTF-8
|
Python
| false
| false
| 8,128
|
py
|
# encoding: utf-8
# module _codecs
# from (built-in)
# by generator 1.147
# no doc
# no imports
# functions
def ascii_decode(*args, **kwargs): # real signature unknown
pass
def ascii_encode(*args, **kwargs): # real signature unknown
pass
def charmap_build(*args, **kwargs): # real signature unknown
pass
def charmap_decode(*args, **kwargs): # real signature unknown
pass
def charmap_encode(*args, **kwargs): # real signature unknown
pass
def decode(*args, **kwargs): # real signature unknown
"""
Decodes obj using the codec registered for encoding.
Default encoding is 'utf-8'. errors may be given to set a
different error handling scheme. Default is 'strict' meaning that encoding
errors raise a ValueError. Other possible values are 'ignore', 'replace'
and 'backslashreplace' as well as any other name registered with
codecs.register_error that can handle ValueErrors.
"""
pass
def encode(*args, **kwargs): # real signature unknown
"""
Encodes obj using the codec registered for encoding.
The default encoding is 'utf-8'. errors may be given to set a
different error handling scheme. Default is 'strict' meaning that encoding
errors raise a ValueError. Other possible values are 'ignore', 'replace'
and 'backslashreplace' as well as any other name registered with
codecs.register_error that can handle ValueErrors.
"""
pass
def escape_decode(*args, **kwargs): # real signature unknown
pass
def escape_encode(*args, **kwargs): # real signature unknown
pass
def latin_1_decode(*args, **kwargs): # real signature unknown
pass
def latin_1_encode(*args, **kwargs): # real signature unknown
pass
def lookup(*args, **kwargs): # real signature unknown
""" Looks up a codec tuple in the Python codec registry and returns a CodecInfo object. """
pass
def lookup_error(errors): # real signature unknown; restored from __doc__
"""
lookup_error(errors) -> handler
Return the error handler for the specified error handling name or raise a
LookupError, if no handler exists under this name.
"""
pass
def raw_unicode_escape_decode(*args, **kwargs): # real signature unknown
pass
def raw_unicode_escape_encode(*args, **kwargs): # real signature unknown
pass
def readbuffer_encode(*args, **kwargs): # real signature unknown
pass
def register(*args, **kwargs): # real signature unknown
"""
Register a codec search function.
Search functions are expected to take one argument, the encoding name in
all lower case letters, and either return None, or a tuple of functions
(encoder, decoder, stream_reader, stream_writer) (or a CodecInfo object).
"""
pass
def register_error(*args, **kwargs): # real signature unknown
"""
Register the specified error handler under the name errors.
handler must be a callable object, that will be called with an exception
instance containing information about the location of the encoding/decoding
error and must return a (replacement, new position) tuple.
"""
pass
def unicode_escape_decode(*args, **kwargs): # real signature unknown
pass
def unicode_escape_encode(*args, **kwargs): # real signature unknown
pass
def unicode_internal_decode(*args, **kwargs): # real signature unknown
pass
def unicode_internal_encode(*args, **kwargs): # real signature unknown
pass
def utf_16_be_decode(*args, **kwargs): # real signature unknown
pass
def utf_16_be_encode(*args, **kwargs): # real signature unknown
pass
def utf_16_decode(*args, **kwargs): # real signature unknown
pass
def utf_16_encode(*args, **kwargs): # real signature unknown
pass
def utf_16_ex_decode(*args, **kwargs): # real signature unknown
pass
def utf_16_le_decode(*args, **kwargs): # real signature unknown
pass
def utf_16_le_encode(*args, **kwargs): # real signature unknown
pass
def utf_32_be_decode(*args, **kwargs): # real signature unknown
pass
def utf_32_be_encode(*args, **kwargs): # real signature unknown
pass
def utf_32_decode(*args, **kwargs): # real signature unknown
pass
def utf_32_encode(*args, **kwargs): # real signature unknown
pass
def utf_32_ex_decode(*args, **kwargs): # real signature unknown
pass
def utf_32_le_decode(*args, **kwargs): # real signature unknown
pass
def utf_32_le_encode(*args, **kwargs): # real signature unknown
pass
def utf_7_decode(*args, **kwargs): # real signature unknown
pass
def utf_7_encode(*args, **kwargs): # real signature unknown
pass
def utf_8_decode(*args, **kwargs): # real signature unknown
pass
def utf_8_encode(*args, **kwargs): # real signature unknown
pass
def _forget_codec(*args, **kwargs): # real signature unknown
""" Purge the named codec from the internal codec lookup cache """
pass
# classes
class __loader__(object):
"""
Meta path import for built-in modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@classmethod
def create_module(cls, *args, **kwargs): # real signature unknown
""" Create a built-in module """
pass
@classmethod
def exec_module(cls, *args, **kwargs): # real signature unknown
""" Exec a built-in module """
pass
@classmethod
def find_module(cls, *args, **kwargs): # real signature unknown
"""
Find the built-in module.
If 'path' is ever specified then the search is considered a failure.
This method is deprecated. Use find_spec() instead.
"""
pass
@classmethod
def find_spec(cls, *args, **kwargs): # real signature unknown
pass
@classmethod
def get_code(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have code objects. """
pass
@classmethod
def get_source(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have source code. """
pass
@classmethod
def is_package(cls, *args, **kwargs): # real signature unknown
""" Return False as built-in modules are never packages. """
pass
@classmethod
def load_module(cls, *args, **kwargs): # real signature unknown
"""
Load the specified module into sys.modules and return it.
This method is deprecated. Use loader.exec_module instead.
"""
pass
def module_repr(module): # reliably restored by inspect
"""
Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is "mappingproxy({'__module__': '_frozen_importlib', '__doc__': 'Meta path import for built-in modules.\\n\\n All methods are either class or static methods to avoid the need to\\n instantiate the class.\\n\\n ', 'module_repr': <staticmethod object at 0x7f3ceeae5048>, 'find_spec': <classmethod object at 0x7f3ceeae5080>, 'find_module': <classmethod object at 0x7f3ceeae50b8>, 'create_module': <classmethod object at 0x7f3ceeae50f0>, 'exec_module': <classmethod object at 0x7f3ceeae5128>, 'get_code': <classmethod object at 0x7f3ceeae5198>, 'get_source': <classmethod object at 0x7f3ceeae5208>, 'is_package': <classmethod object at 0x7f3ceeae5278>, 'load_module': <classmethod object at 0x7f3ceeae52b0>, '__dict__': <attribute '__dict__' of 'BuiltinImporter' objects>, '__weakref__': <attribute '__weakref__' of 'BuiltinImporter' objects>})"
# variables with complex values
__spec__ = None # (!) real value is "ModuleSpec(name='_codecs', loader=<class '_frozen_importlib.BuiltinImporter'>, origin='built-in')"
|
[
"sarthakpatidar15@gmail.com"
] |
sarthakpatidar15@gmail.com
|
419ece67d00ba37f7460c57ca43884e4ca9e0b2f
|
dafd25bce1c1fe6c667119e7a541b3cdb44b6181
|
/youtube_tranding/auto.py
|
454beae653fc6832b41abb2c345f1805d9012b0e
|
[] |
no_license
|
armannurhidayat/python_scraper
|
23fd84dfb7263b7e23a96bb72ee8b7ce68cb52df
|
5b0b56247e753f190a9dfdd0bbc150f84d1609f9
|
refs/heads/master
| 2023-02-05T00:00:48.380383
| 2020-12-27T02:28:29
| 2020-12-27T02:28:29
| 250,330,177
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,010
|
py
|
import mysql.connector
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import time
config = {
'user' : 'root',
'password': 'root',
'host' : 'localhost',
'port' : 8889,
'database': 'yt_trending',
'raise_on_warnings': True
}
url = 'https://www.youtube.com/feed/trending?gl=ID&hl=id'
path_driver = "/Users/armannurhidayat/Desktop/selenium/driver/chromedriver"
chrome_options = Options()
chrome_options.add_argument("--incognito")
driver = webdriver.Chrome(executable_path=path_driver, options=chrome_options)
driver.get(url)
video_title = driver.find_elements_by_xpath('//*[@id="video-title"]')
for rec in video_title:
title = rec.text
link = rec.get_attribute('href')
# Insert SQL
print('{} - {}'.format(title, link))
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor(dictionary=True)
cursor.execute("INSERT INTO `trand` (`title`,`url`) VALUES ('{}', '{}')".format(title, link))
cnx.commit()
time.sleep(2)
|
[
"armannurhidayat7@gmail.com"
] |
armannurhidayat7@gmail.com
|
de900b93a39989817a16977f10cddadf7b234e32
|
06292f96cba132ca57777672a447cfff7c5abee6
|
/Asm1/ThamKhao/testPhuc/ParserSuite.py
|
940fca2dcbd72c810ca6309115c038b9b10115ef
|
[] |
no_license
|
kietteik/ppl
|
1746440b12affe71e67d6f958922b32b1fdaab5c
|
2ee60582e81595b8d8b5d0f8212d20151cfe9264
|
refs/heads/master
| 2023-03-01T00:24:36.969189
| 2021-01-31T05:15:13
| 2021-01-31T05:15:13
| 305,802,556
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,965
|
py
|
import unittest
from TestUtils import TestParser
class ParserSuite(unittest.TestCase):
def test_var_dec_1(self):
input = """Var: a = 5;"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,201))
def test_var_dec_2(self):
input = """Var: ;"""
expect = "Error on line 1 col 5: ;"
self.assertTrue(TestParser.checkParser(input,expect,202))
def test_var_dec_3(self):
input = """Var: b[2][3] = {{2,3,4},{4,5,6}};"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,203))
def test_var_dec_4(self):
input = """Var: c, d = 6, e, f;"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,204))
def test_var_dec_5(self):
input = """Var: m, n[10];"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,205))
def test_var_dec_6(self):
input = """Var: n[10], m;"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,206))
def test_assignment_stm_1(self):
input = """a[3 + foo(2)] = a[b [2][3]] + 4;"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,207))
def test_assignment_stm_2(self):
input = """v = (4. \. 3.) *. 3.14 *. r *. r *. r;"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,208))
def test_callee_stm_1(self):
input = """foo(a[1][2] + 2, x + 1);"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,209))
def test_callee_stm_2(self):
input = """x = foo(a[1][2] + 2, x + 1);"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,210))
def test_function_1(self):
input = """Function: foo
Parameter: a[5], b
Body:
Var: i = 0;
While (i < 5) Do
a[i] = b +. 1.0;
i = i + 1;
EndWhile.
EndBody."""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,211))
def test_for_1(self):
input = """For (i = 0, i < 10, 2) Do
writeln(i);
EndFor."""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,212))
def test_var_dec_7(self):
input = """Var: a = 5;
Var: b[2][3] = {{2,3,4},{4,5,6}};
Var: c, d = 6, e, f;
Var: m, n[10];"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,213))
def test_var_dec_func_dec_1(self):
input = """Var: x;
Function: fact
Parameter: n
Body:
If n == 0 Then
Return 1;
Else
Return n * fact (n - 1);
EndIf.
EndBody.
Function: main
Body:
x = 10;
fact (x);
EndBody."""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,214))
def test_var_dec_8(self):
input = """Var: a[5] = {1,4,3,2,0};
Var: b[2][3]={{1,2,3},{4,5,6}};"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,215))
def test_var_dec_func_dec_2(self):
input = """Var: x;
Function: fact
Parameter: n
Body:
If n == 0 Then
Return 1;
Else
Return n * fact (n - 1);
EndIf.
EndBody.
Function: main
Body:
x = 10;
fact (x);
EndBody."""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,216))
def test_callee_stm_3(self):
input = """foo (2 + x, 4. \. y);
goo ();"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,217))
def test_type_coercions_1(self):
input = """If bool_of_string ("True") Then
a = int_of_string (read ());
b = float_of_int (a) +. 2.0;
EndIf."""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,218))
def test_var_dec_func_dec_3(self):
input = """Function: test
Parameter: n
Body:
If n > 10 Then
Return 5;
Else
Return False;
EndIf.
EndBody."""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,219))
def test_var_dec_9(self):
input = """a = 1;
b[2][3] = 5;
c[2] = {{1,3},{1,5,7}};"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,220))
def test_var_dec_func_dec_4(self):
input = """Function: test
Parameter: n
Body:
If n > 10 Then
Return 5;
Else
Return a[4][5 + b[2][3]];
EndIf.
EndBody."""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,221))
def test_var_dec_10(self):
input = """Var: a = "Xin chao moi nguoi!";
Var: b = 5, c = False;"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,222))
|
[
"kietteikdoi@gmail.com"
] |
kietteikdoi@gmail.com
|
7c7c20cdfdc1ad53c73c20ab931e5efcff7528aa
|
be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1
|
/Gauss_v45r9/Gen/DecFiles/options/15174011.py
|
d436adf8e9635a383cb4126186976d45e43be6ef
|
[] |
no_license
|
Sally27/backup_cmtuser_full
|
34782102ed23c6335c48650a6eaa901137355d00
|
8924bebb935b96d438ce85b384cfc132d9af90f6
|
refs/heads/master
| 2020-05-21T09:27:04.370765
| 2018-12-12T14:41:07
| 2018-12-12T14:41:07
| 185,989,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 774
|
py
|
# file /home/hep/ss4314/cmtuser/Gauss_v45r9/Gen/DecFiles/options/15174011.py generated: Fri, 27 Mar 2015 16:10:06
#
# Event Type: 15174011
#
# ASCII decay Descriptor: [Lambda_b0 -> D+(-> K- pi+ pi+) mu-]cc
#
from Configurables import Generation
Generation().EventType = 15174011
Generation().SampleGenerationTool = "SignalPlain"
from Configurables import SignalPlain
Generation().addTool( SignalPlain )
Generation().SignalPlain.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Lb_D+mu,Kpipi=phsp,DecProdCut.dec"
Generation().SignalPlain.CutTool = "DaughtersInLHCb"
Generation().SignalPlain.SignalPIDList = [ 5122,-5122 ]
|
[
"slavomirastefkova@b2pcx39016.desy.de"
] |
slavomirastefkova@b2pcx39016.desy.de
|
6dae49d404619d13917ca5e5dd08fa811d287aad
|
37db56765276c0835a2c7e3955c412ce204836c1
|
/241.py
|
19a44d3fb46bd049d5ae60b83250f8d7f974f167
|
[] |
no_license
|
supperllx/LeetCode
|
9d0a3a7258d1cff6afa6e77f61a2e697834914ca
|
df3a589ea858218f689fe315d134adc957c3debd
|
refs/heads/master
| 2023-05-01T06:57:17.403568
| 2021-05-19T18:29:25
| 2021-05-19T18:34:03
| 288,351,041
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 658
|
py
|
class Solution:
@functools.cache
def diffWaysToCompute(self, input: str) -> List[int]:
if input.isnumeric():
return [int(input)]
else:
ops = {'+': operator.add, '-': operator.sub, '*': operator.mul}
res = []
indexes = []
for i, ch in enumerate(input):
if ch in ops:
indexes.append(i)
for i in indexes:
for left in self.diffWaysToCompute(input[:i]):
for right in self.diffWaysToCompute(input[i+1:]):
res.append(ops[input[i]](left, right))
return list(res)
|
[
"supperllx@outlook.com"
] |
supperllx@outlook.com
|
ab05aff12541f245de8fe7bd4437943a46a928ec
|
77166c6ed9b872fa69b454d3682f63527f5f3951
|
/tests/unit2/test_load_textures.py
|
9f74bd64ebe79c8225596d0f8f2a84e1267ace5a
|
[
"MIT"
] |
permissive
|
biggzlar/arcade
|
d72d936f3c244a9d5173b6f36bca3ede3382a0ae
|
fc444db356452660ac6cb2ffe241f0b1a3d4bcf3
|
refs/heads/master
| 2020-12-14T06:30:18.997456
| 2020-01-18T04:44:03
| 2020-01-18T04:44:03
| 234,668,560
| 1
| 0
|
NOASSERTION
| 2020-01-18T02:07:41
| 2020-01-18T02:07:40
| null |
UTF-8
|
Python
| false
| false
| 4,737
|
py
|
import os
import arcade
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
LINE_HEIGHT = 20
CHARACTER_SCALING = 0.5
COIN_SCALE = 0.25
class MyTestWindow(arcade.Window):
def __init__(self, width, height, title):
super().__init__(width, height, title)
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
self.frame_count = 0
arcade.set_background_color(arcade.color.AMAZON)
self.character_list = arcade.SpriteList()
self.player = arcade.AnimatedWalkingSprite()
self.player.stand_right_textures = [arcade.load_texture(":resources:images/animated_characters/robot/robot_idle.png")]
self.player.stand_left_textures = [arcade.load_texture(":resources:images/animated_characters/robot/robot_idle.png", mirrored=True)]
self.player.walk_right_textures = [arcade.load_texture(":resources:images/animated_characters/robot/robot_walk0.png"),
arcade.load_texture(":resources:images/animated_characters/robot/robot_walk1.png"),
arcade.load_texture(":resources:images/animated_characters/robot/robot_walk2.png"),
arcade.load_texture(":resources:images/animated_characters/robot/robot_walk3.png"),
arcade.load_texture(":resources:images/animated_characters/robot/robot_walk4.png"),
arcade.load_texture(":resources:images/animated_characters/robot/robot_walk5.png"),
arcade.load_texture(":resources:images/animated_characters/robot/robot_walk6.png"),
arcade.load_texture(":resources:images/animated_characters/robot/robot_walk7.png")]
self.player.walk_left_textures = [arcade.load_texture(":resources:images/animated_characters/robot/robot_walk0.png", mirrored=True),
arcade.load_texture(":resources:images/animated_characters/robot/robot_walk1.png", mirrored=True),
arcade.load_texture(":resources:images/animated_characters/robot/robot_walk2.png", mirrored=True),
arcade.load_texture(":resources:images/animated_characters/robot/robot_walk3.png", mirrored=True),
arcade.load_texture(":resources:images/animated_characters/robot/robot_walk4.png", mirrored=True),
arcade.load_texture(":resources:images/animated_characters/robot/robot_walk5.png", mirrored=True),
arcade.load_texture(":resources:images/animated_characters/robot/robot_walk6.png", mirrored=True),
arcade.load_texture(":resources:images/animated_characters/robot/robot_walk7.png", mirrored=True)]
self.player.texture_change_distance = 20
self.player.center_x = SCREEN_WIDTH // 2
self.player.center_y = SCREEN_HEIGHT // 2
self.player.scale = 0.8
self.player.change_x = 2
self.player.texture = self.player.stand_left_textures[0]
self.character_list.append(self.player)
self.coin_list = arcade.SpriteList()
coin = arcade.AnimatedTimeSprite(scale=0.5)
coin.center_x = 500
coin.center_y = 500
coin.textures = []
coin.textures.append(arcade.load_texture(":resources:images/items/gold_1.png", scale=COIN_SCALE))
coin.textures.append(arcade.load_texture(":resources:images/items/gold_2.png", scale=COIN_SCALE))
coin.textures.append(arcade.load_texture(":resources:images/items/gold_3.png", scale=COIN_SCALE))
coin.textures.append(arcade.load_texture(":resources:images/items/gold_4.png", scale=COIN_SCALE))
coin.textures.append(arcade.load_texture(":resources:images/items/gold_3.png", scale=COIN_SCALE))
coin.textures.append(arcade.load_texture(":resources:images/items/gold_2.png", scale=COIN_SCALE))
coin.set_texture(0)
self.coin_list.append(coin)
def on_draw(self):
arcade.start_render()
self.coin_list.draw()
self.character_list.draw()
def update(self, delta_time):
self.frame_count += 1
if self.frame_count == 70:
self.player.change_x *= -1
self.coin_list.update()
self.coin_list.update_animation(delta_time)
self.character_list.update()
self.character_list.update_animation(delta_time)
def test_sprite():
window = MyTestWindow(SCREEN_WIDTH, SCREEN_HEIGHT, "Test Animation")
window.test(90)
window.close()
|
[
"paul@cravenfamily.com"
] |
paul@cravenfamily.com
|
ca40a4e3bed0d8bd97234170a3f8ec82dfa77d02
|
4b44a299bafbd4ca408ce1c89c9fe4a449632783
|
/python3/14_Code_Quality/04_mocking/example_2/test_calculator.py
|
773fb6d377820f066c3352f19151f1de0c8dade3
|
[] |
no_license
|
umunusb1/PythonMaterial
|
ecd33d32b2de664eaaae5192be7c3f6d6bef1d67
|
1e0785c55ccb8f5b9df1978e1773365a29479ce0
|
refs/heads/master
| 2023-01-23T23:39:35.797800
| 2020-12-02T19:29:00
| 2020-12-02T19:29:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 244
|
py
|
from unittest import TestCase
from main import Calculator
class TestCalculator(TestCase):
def setUp(self):
self.calc = Calculator()
def test_sum(self):
answer = self.calc.sum(2, 4)
self.assertEqual(answer, 6)
|
[
"uday3prakash@gmail.com"
] |
uday3prakash@gmail.com
|
1965d29d375c6499f282d0e556160f23324bf4c5
|
bec623f2fab5bafc95eb5bd95e7527e06f6eeafe
|
/django-gc-shared/visitor_analytics/migrations/0001_initial.py
|
8f99c744d9c12c4a97d23a04880458deedb220fe
|
[] |
no_license
|
riyanhax/a-demo
|
d714735a8b59eceeb9cd59f788a008bfb4861790
|
302324dccc135f55d92fb705c58314c55fed22aa
|
refs/heads/master
| 2022-01-21T07:24:56.468973
| 2017-10-12T13:48:55
| 2017-10-12T13:48:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,255
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UtmAnalytics',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('utm_source', models.CharField(default=b'', max_length=256)),
('utm_medium', models.CharField(default=b'', max_length=256)),
('utm_campaign', models.CharField(default=b'', max_length=256)),
('utm_timestamp', models.DateTimeField(null=True)),
('referrer', models.CharField(default=b'', max_length=2048)),
('referrer_timestamp', models.DateTimeField(null=True)),
('agent_code_timestamp', models.DateTimeField(null=True)),
('registration_page', models.CharField(default=b'', max_length=4096)),
('user', models.OneToOneField(related_name='utm_analytics', to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"ibalyko@ubuntu-server-16-04"
] |
ibalyko@ubuntu-server-16-04
|
c51fe65b0eea696958298037853329c6a2320d97
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/62/usersdata/172/29550/submittedfiles/ex1.py
|
c6a1b4c62ab0c54ae5cb58541624113d1e9cb84a
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 194
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
a = float(input('Digite a: '))
b = float(input('Digite b: '))
c = float(input('Digite c: '))
#COMECE A PARTIR DAQUI!
d=b*b-4(a*c)
print(d)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
7d24a6875659d927451451cfe5c01e242aee20e7
|
417ab6024a95e97b4d2236c67e28d00e6d1defc0
|
/python/fetch/mayi01/img.py
|
589afd6265b67e6b4c17fef51b293b349378b05d
|
[] |
no_license
|
zeus911/myconf
|
11139069948f7c46f760ca0a8f1bd84df5ec4275
|
6dc7a6761ab820d6e97a33a55a8963f7835dbf34
|
refs/heads/master
| 2020-04-18T02:16:09.560219
| 2019-01-22T18:15:08
| 2019-01-22T18:15:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,826
|
py
|
#!/usr/bin python
# -*- coding: utf-8 -*-
import datetime
import threading
from common import common
from baseparse import *
from common import db_ops
from common.envmod import *
from common import dateutil
from fetch.profile import *
global baseurl
class ImgParse(BaseParse):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
objs = self.parseChannel()
dbVPN = db.DbVPN()
ops = db_ops.DbOps(dbVPN)
for obj in objs:
ops.inertImgChannel(obj)
dbVPN.commit()
for obj in objs:
for i in range(1, maxImgPage):
url = obj['url']
if i!=1:
url= "%s%s%s"%(url.replace("0.html",""),i,".html")
print url
count = self.update(url, ops, obj['url'], i)
dbVPN.commit()
if count == 0:
break
def parseChannel(self):
ahrefs = self.header("header3.html")
objs = []
for ahref in ahrefs:
obj = {}
obj['name']= ahref.text
obj['url']=ahref.get("href")
obj['baseurl'] = baseurlImg
obj['updateTime'] = datetime.datetime.now()
obj['rate'] = 1.1
obj['showType'] = 3
obj['channel'] = 'porn_sex'
objs.append(obj)
return objs
def update(self, url, ops, channel, i):
objs = self.fetchImgItemsData(url, channel)
print "解析 Img 图片ok----channl=", channel, ' 页数=', i, " 数量=", len(objs)
for obj in objs:
try:
ops.inertImgItems(obj)
for picItem in obj['picList']:
item = {}
item['itemUrl'] = obj['url']
item['picUrl'] = picItem
ops.inertImgItems_item(item)
except Exception as e:
print common.format_exception(e)
return len(objs)
def fetchDataHead(self, url):
try:
soup = self.fetchUrl(baseurlImg+url)
div = soup.first("div", {"class": "container"})
if div != None:
return div.findAll('a')
return []
except Exception as e:
print common.format_exception(e)
def fetchImgItemsData(self, url, channel):
try:
lis = self.fetchDataHead(url)
print url, ";itemsLen=", len(lis)
objs = []
sortType = dateutil.y_m_d()
for item in lis:
obj = {}
obj['name'] = item.first("div",{"class":"float-left"}).text
print obj['name']
obj['url'] = item.get('href')
obj['fileDate'] = item.first("div",{"class":"float-right"}).text
obj['baseurl'] = baseurlImg
obj['channel'] = channel
obj['updateTime'] = datetime.datetime.now()
pics = self.fetchImgs(item.get('href'))
if len(pics) == 0:
print '没有 图片文件--', item, '---', url
continue
obj['picList'] = pics
obj['pics'] = len(pics)
obj['sortType'] = sortType
obj['showType'] = 3
print 'url=', obj['url'], 'filedate=', obj['fileDate'], ' 图片数量=', len(pics)
objs.append(obj)
return objs
except Exception as e:
print common.format_exception(e)
def fetchImgs(self, url):
soup = self.fetchUrl(baseurlImg+url)
picData = soup.first("div", {"class": "imgList"})
picList = picData.findAll("img")
pics = []
for item in picList:
pics.append(item.get('data-original'))
return pics
|
[
"liguoqing19861028@163.com"
] |
liguoqing19861028@163.com
|
54a4f16e087a850dd1d3dd642d82d5f350eae00e
|
de28d64694919a861760fa8db8ff5ff781c9e4e3
|
/wild_card_matching.py
|
653f701bd6d307a858b2dc5d5dc736c7214b34a3
|
[] |
no_license
|
pramitsawant/interview_prep_python
|
2202ac6bd7574316885f6067a8c6ac98fc5e88af
|
7a3dad53b954f874995bafdbfd5677959f8b8de7
|
refs/heads/master
| 2020-09-25T06:34:26.903728
| 2016-12-12T13:59:46
| 2016-12-12T13:59:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,219
|
py
|
# coding=utf-8
'''
'?' Matches any single character.
'*' Matches any sequence of characters (including the empty sequence).
The matching should cover the entire input string (not partial).
The function prototype should be:
bool isMatch(const char *s, const char *p)
Some examples:
isMatch("aa","a") → false
isMatch("aa","aa") → true
isMatch("aaa","aa") → false
isMatch("aa", "*") → true
isMatch("aa", "a*") → true
isMatch("ab", "?*") → true
isMatch("aab", "c*a*b") → false
'''
def wild_card(word, pattern, word_index, pattern_index):
if len(pattern) == pattern_index and len(word) == word_index:
return True
if len(pattern) == pattern_index or len(word) == word_index:
return False
if pattern[pattern_index] == '?':
return wild_card(word, pattern, word_index + 1, pattern_index + 1)
elif pattern[pattern_index] == '*':
while word_index < len(word) - 1 and (word_index == 0 or word[word_index + 1] == word[word_index]):
word_index += 1
return wild_card(word, pattern, word_index + 1, pattern_index + 1)
elif word[word_index] == pattern[pattern_index]:
return wild_card(word, pattern, word_index + 1, pattern_index + 1)
else:
return False
print wild_card('aaaab', '*?', 0, 0)
|
[
"yask123@gmail.com"
] |
yask123@gmail.com
|
8f4bb724abec5c2be28133f11fadc5f0306fc94f
|
3c000380cbb7e8deb6abf9c6f3e29e8e89784830
|
/venv/Lib/site-packages/cobra/modelimpl/fv/from.py
|
59ac01b9a307e3cca82f10539aba98acaab784ca
|
[] |
no_license
|
bkhoward/aciDOM
|
91b0406f00da7aac413a81c8db2129b4bfc5497b
|
f2674456ecb19cf7299ef0c5a0887560b8b315d0
|
refs/heads/master
| 2023-03-27T23:37:02.836904
| 2021-03-26T22:07:54
| 2021-03-26T22:07:54
| 351,855,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,246
|
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class From(Mo):
meta = ClassMeta("cobra.model.fv.From")
meta.isAbstract = True
meta.moClassName = "fvFrom"
meta.moClassName = "fvFrom"
meta.rnFormat = ""
meta.category = MoCategory.RELATIONSHIP_FROM_LOCAL
meta.label = "None"
meta.writeAccessMask = 0x0
meta.readAccessMask = 0x0
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = False
meta.isContextRoot = False
meta.superClasses.add("cobra.model.reln.From")
meta.superClasses.add("cobra.model.reln.Inst")
meta.concreteSubClasses.add("cobra.model.nw.RtL3EpDefToPathEp")
meta.concreteSubClasses.add("cobra.model.mgmt.RtRtdMgmtConf")
meta.concreteSubClasses.add("cobra.model.pc.RtAccBndlGrpToAggrIf")
meta.concreteSubClasses.add("cobra.model.fv.RtIpEppAtt")
meta.concreteSubClasses.add("cobra.model.l3ext.RtAddrToIpDef")
meta.concreteSubClasses.add("cobra.model.cons.RtConsRoot")
meta.concreteSubClasses.add("cobra.model.l3.RtIpCktEpIfConn")
meta.concreteSubClasses.add("cobra.model.l2.RtSpanSrcToL2CktEpAtt")
meta.concreteSubClasses.add("cobra.model.l1.RtEncPhysRtdConf")
meta.concreteSubClasses.add("cobra.model.svccopy.RtToCopyDestGrp")
meta.concreteSubClasses.add("cobra.model.dhcp.RtRelayAddrToProv")
meta.concreteSubClasses.add("cobra.model.fv.RtQinqEppAtt")
meta.concreteSubClasses.add("cobra.model.pc.RtFcAccBndlGrpToFcAggrIf")
meta.concreteSubClasses.add("cobra.model.l1.RtBrConf")
meta.concreteSubClasses.add("cobra.model.rtextcom.RtExtCommAtt")
meta.concreteSubClasses.add("cobra.model.qosp.RtDot1pRuleAtt")
meta.concreteSubClasses.add("cobra.model.qosp.RtL3dscpRuleAtt")
meta.concreteSubClasses.add("cobra.model.fv.RtLbIfToLocale")
meta.concreteSubClasses.add("cobra.model.leqpt.RtEpDefToLooseNode")
meta.concreteSubClasses.add("cobra.model.fv.RtIpAddr")
meta.concreteSubClasses.add("cobra.model.l1.RtPhysRtdConf")
meta.concreteSubClasses.add("cobra.model.svcredir.RtHealthGrpAtt")
meta.concreteSubClasses.add("cobra.model.l3.RtEPgDefToL3Dom")
meta.concreteSubClasses.add("cobra.model.nw.RtVsanLabelAtt")
meta.concreteSubClasses.add("cobra.model.l1.RtIoPPhysConf")
meta.concreteSubClasses.add("cobra.model.fv.RtCtxToEpP")
meta.concreteSubClasses.add("cobra.model.fv.RtMacEppAtt")
meta.concreteSubClasses.add("cobra.model.l1.RtToObservedEthIf")
meta.concreteSubClasses.add("cobra.model.pc.RtPcFcAggrBrConf")
meta.concreteSubClasses.add("cobra.model.nw.RtVsanPathAtt")
meta.concreteSubClasses.add("cobra.model.fv.RtVlanEppAtt")
meta.concreteSubClasses.add("cobra.model.fv.RtVxlanEppAtt")
meta.concreteSubClasses.add("cobra.model.nw.RtEpDefToPathEp")
meta.concreteSubClasses.add("cobra.model.ip.RtRtDefIpAddr")
meta.concreteSubClasses.add("cobra.model.l2.RtDomIfConn")
meta.concreteSubClasses.add("cobra.model.pc.RtVpcConf")
meta.concreteSubClasses.add("cobra.model.pc.RtFexBndlGrpToAggrIf")
meta.concreteSubClasses.add("cobra.model.rtregcom.RtRegCommAtt")
meta.concreteSubClasses.add("cobra.model.rtpfx.RtRtNhAtt")
meta.concreteSubClasses.add("cobra.model.nw.RtPathDomAtt")
meta.concreteSubClasses.add("cobra.model.fv.RtExtBD")
meta.concreteSubClasses.add("cobra.model.l2.RtSrvExtIfMap")
meta.concreteSubClasses.add("cobra.model.qosp.RtL3dot1pRuleAtt")
meta.concreteSubClasses.add("cobra.model.l1.RtExtConf")
meta.concreteSubClasses.add("cobra.model.qosp.RtDscpRuleAtt")
meta.concreteSubClasses.add("cobra.model.l1.RtFcBrConf")
meta.concreteSubClasses.add("cobra.model.fv.RtToTunDef")
meta.concreteSubClasses.add("cobra.model.analytics.RtMonitorAtt")
meta.concreteSubClasses.add("cobra.model.fv.RtL3If")
meta.concreteSubClasses.add("cobra.model.fv.RtRouteToIfConn")
meta.concreteSubClasses.add("cobra.model.fv.RtToEpgProt")
meta.concreteSubClasses.add("cobra.model.l1.RtLocaleToObservedEthIf")
meta.concreteSubClasses.add("cobra.model.rtpfx.RtRtDstAtt")
meta.concreteSubClasses.add("cobra.model.l2.RtEPgDefToL2Dom")
meta.concreteSubClasses.add("cobra.model.ip.RtRouteToRouteDef")
meta.concreteSubClasses.add("cobra.model.svcredir.RtToRedirDestGrp")
meta.concreteSubClasses.add("cobra.model.fv.RtMacBaseEppAtt")
meta.concreteSubClasses.add("cobra.model.nw.RtEpDefRefToPathEp")
meta.concreteSubClasses.add("cobra.model.svccopy.RtCopyDestAtt")
meta.concreteSubClasses.add("cobra.model.rtpfx.RtRtSrcAtt")
meta.rnPrefixes = [
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 101, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tDn", "tDn", 100, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("tDn", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"bkhoward@live.com"
] |
bkhoward@live.com
|
d3920561c0ee2d24d702c2e5d3ce5eb4d555dbd3
|
b94ab99f9c1f8bbb99afd23e1bfcd2332060b4bd
|
/run.py
|
4e3d7e4f1400e503e81bde7fd29bd036b083e6d3
|
[] |
no_license
|
georgecai904/bookshelf
|
e54ccae00d4ee48e91ca1564a425ba4586b52d93
|
0002207dc8ca586ce1127d3ea98bb53102d043df
|
refs/heads/master
| 2021-01-02T22:52:26.046535
| 2017-08-05T15:32:13
| 2017-08-05T15:32:13
| 99,409,971
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 275
|
py
|
import time
def run_bash_command(command):
import subprocess
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE, stdin=subprocess.PIPE)
print(process.communicate())
while True:
run_bash_command("python manage.py runcrons")
time.sleep(60)
|
[
"georgemail608@gmail.com"
] |
georgemail608@gmail.com
|
c9ea21dbe986fc835e4e862c46e843454f7a3906
|
1c78b144662c5d0473e53ff5622e6cbf8c593ef9
|
/nba_matchup/yfs.py
|
6f89a3e678a6563409b64ce31519d6b5e36ce0aa
|
[
"MIT"
] |
permissive
|
sharadmv/nba-fantasy-optimize
|
fe1eb45df26a8c23b47de1ff5ad98ada877dfd77
|
81fdfa78fce4ce356220c91a1063a6efcfa4ff02
|
refs/heads/master
| 2022-12-13T10:34:51.756539
| 2021-11-23T05:54:12
| 2021-11-23T05:54:12
| 168,676,401
| 4
| 0
|
MIT
| 2022-12-08T01:44:37
| 2019-02-01T09:48:53
|
Python
|
UTF-8
|
Python
| false
| false
| 774
|
py
|
from yaspin import yaspin
import datetime
from yahoo_oauth import OAuth2
from fantasy_sport import FantasySport
__all__ = ['yfs', 'LEAGUE_KEY', 'CURRENT_WEEK', 'START_DATE']
LEAGUE_KEY = "nba.l.64384"
oauth = OAuth2(None, None, from_file='oauth.json', base_url='https://fantasysports.yahooapis.com/fantasy/v2/')
yfs = FantasySport(oauth, fmt='json')
with yaspin(text="Fetching league data", color='cyan'):
response = yfs.get_leagues([LEAGUE_KEY]).json()['fantasy_content']['leagues']['0']['league'][0]
START_DATE = datetime.datetime.strptime(response['start_date'], "%Y-%m-%d").date()
while START_DATE.weekday() != 0:
START_DATE -= datetime.timedelta(days=1)
diff = datetime.datetime.today().date() - START_DATE
CURRENT_WEEK = response.get('current_week', None)
|
[
"sharad.vikram@gmail.com"
] |
sharad.vikram@gmail.com
|
5c48e7effb0eb65a92e95ec6ab09a44a7f7f028d
|
192dec1ea734fd67a3c3720228826cf754b2da5a
|
/valeo/vr/models.py
|
a1a86f3d91add9b73c1e35fc55c2fc8182a5293b
|
[] |
no_license
|
fafaschiavo/cpi_valeo
|
a4df4e64161e58e44ade276f0b6284abfb5af6d2
|
777ef6173bbc4bf5941098cb2ea3b13fccf490c1
|
refs/heads/master
| 2020-04-06T04:14:59.226013
| 2017-05-02T22:39:00
| 2017-05-02T22:39:00
| 82,980,893
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 513
|
py
|
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class joystick(models.Model):
joystick_data = models.CharField(max_length=200)
angle = models.IntegerField(default=90)
pedal = models.FloatField(default=0)
left_buttons = models.IntegerField(default=0)
right_buttons = models.IntegerField(default=0)
# state = joystick(joystick_data = '', angle = 90, pedal = 0, left_buttons = 0, right_buttons = 0)
# state.save()
# joystick.objects.get(id = 1)
|
[
"fayschiavo@gmail.com"
] |
fayschiavo@gmail.com
|
f6dba0a7e196a8718ed2fb1a7978fd42953ee6e8
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_322/ch30_2019_03_26_18_43_32_991323.py
|
901b0a5a75d9547c3b7aa65aa4eecdd7ec4a9796
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 328
|
py
|
import math
velocidade = int(input("Qual a velocidade?:"))
angulo = int(input("Qual o angulo?:"))
distancia = (((velocidade ** 2) * (math.sin(2 * math.radians( angulo))))) / (9.8)
if distancia >= 98 and distancia <= 102
print('Acertou!')
if distancia < 98:
print('Muito perto')
if distancia > 102:
print('Muito longe')
|
[
"you@example.com"
] |
you@example.com
|
e8fedba445c7d316d56ebdb5ed0292e721f568e9
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/59/usersdata/162/61510/submittedfiles/testes.py
|
fdf544d79486196cce5adcbccad32091483213bf
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 925
|
py
|
import numpy as np
def menorlinha(a):
for i in range(0,a.shape[0],1):
for j in range(0,a.shape[1],1):
if a[i,j]==1:
return(i)
def menorcoluna(a):
for j in range(0,a.shape[1],1):
for i in range(0,a.shape[0],1):
if a[i,j]==1:
return(j)
def maiorlinha(a):
for i in range(a.shape[0]-1,-1,-1):
for j in range(a.shape[1]-1,-1,-1):
if a[i,j]==1:
return(i)
def maiorlinha(a):
for j in range(a.shape[0]-1,-1,-1):
for i in range(a.shape[1]-1,-1,-1):
if a[i,j]==1:
return(j)
linhas=int(input('linhas: '))
colunas=int(input('colunas: '))
a=np.zeros((linhas,colunas))
for i in range(0,a.shape[0],1):
for j in range(0,a.shape[1],1):
a[i,j]=int(input('digite a matriz'))
x=menorlinha(a)
y=maiorlinha(a)
z=menorcoluna(a)
w=maiorcoluna(a)
print(a[x:y+1,z:w+1])
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
f97d1a8d1a8a7dc659883a5b5bc249e619f17c03
|
f1c071430a352ef82a4e7b902d6081851e5d569a
|
/neuronunit/capabilities/channel.py
|
87c52e19babe3b1c5dbc25efe5553022cee45991
|
[] |
no_license
|
rgerkin/neuronunit
|
b1e5aeadc03c0be3507b0182ae81c89371c5f899
|
85330f1c4e4206b347d5a5e7792d41536ae71a0a
|
refs/heads/master
| 2021-01-17T05:41:06.109463
| 2015-10-23T22:22:30
| 2015-10-23T22:22:30
| 8,037,496
| 2
| 1
| null | 2015-06-25T15:32:01
| 2013-02-05T20:37:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,382
|
py
|
"""Channel capabilities"""
import inspect
import sciunit
class NML2_Channel_Runnable(sciunit.Capability):
"""Capability for models that can be run using functions available in pyNeuroML.analsysi.NML2ChannelAnalysis"""
def NML2_channel_run(self,**run_params):
return NotImplementedError("%s not implemented" % inspect.stack()[0][3])
class ProducesIVCurve(sciunit.Capability):
"""The capability to produce a current-voltage plot for a set of voltage steps"""
def produce_iv_curve(self, **run_params):
"""Produces steady-state and peak IV curve at voltages and conditions given according to 'run_params'"""
return NotImplementedError("%s not implemented" % inspect.stack()[0][3])
def produce_iv_curve_ss(self,**run_params):
"""Produces steady-state IV curve at voltages and conditions given according to 'run_params'"""
return NotImplementedError("%s not implemented" % inspect.stack()[0][3])
def produce_iv_curve_peak(self,*run_params):
"""Produces peak current IV curve at voltages and conditions given according to 'run_params'"""
return NotImplementedError("%s not implemented" % inspect.stack()[0][3])
def plot_iv_curve(self,iv_data):
"""Plots IV Curve using results from 'iv_data'"""
return NotImplementedError("%s not implemented" % inspect.stack()[0][3])
|
[
"rgerkin@asu.edu"
] |
rgerkin@asu.edu
|
e7391ef71192ed06e3c4ff224131362673034076
|
2b4badbedab24ed4376ab65818d0e59af6539144
|
/messcode/mail_first.py
|
5e2d1b63c30dd456564b90b3732846639eeebee1
|
[] |
no_license
|
matthewangbin/Python
|
878d8180d12d235f8d238574414bb41edad5ceee
|
c9a94b4203380a06364da1f7466aafc4b141d951
|
refs/heads/master
| 2021-10-11T01:45:22.890144
| 2019-01-21T02:44:25
| 2019-01-21T02:44:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,071
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2017/12/25 21:55
# @Author : Matthew
# @Site :
# @File : mail_first.py
# @Software: PyCharm
from email import encoders
from email.header import Header
from email.mime.text import MIMEText
from email.utils import parseaddr, formataddr
import smtplib
def _format_addr(s):
name, addr = parseaddr(s)
return formataddr(( \
Header(name, 'utf-8').encode(), \
addr.encode('utf-8') if isinstance(addr, unicode) else addr))
from_addr = 'wb2847@163.com'
password = 'wb284745'
to_addr = '404047816@qq.com'
smtp_server = 'smtp.163.com'
msg = MIMEText('hello, send by Python...', 'plain', 'utf-8')
msg['From'] = _format_addr(u'Python爱好者 <%s>' % from_addr)
msg['To'] = _format_addr(u'管理员 <%s>' % to_addr)
msg['Subject'] = Header(u'来自SMTP的问候……', 'utf-8').encode()
#msg.attach(MIMEText('send with files','plain','utf-8'))
server = smtplib.SMTP(smtp_server, 25)
server.set_debuglevel(1)
server.login(from_addr, password)
server.sendmail(from_addr, [to_addr], msg.as_string())
server.quit()
|
[
"wb2847@163.com"
] |
wb2847@163.com
|
87cc4210ce4c5b742377c17cba6924f894a19b86
|
c1ba3127b3526aba8b9bf25fddd172020a8858a8
|
/easy/array/max_product_of_3_numbers/max_product_of_3_numbers.py
|
92170a0b39e7a4c064842c5fbae87866cdf17d9c
|
[
"MIT"
] |
permissive
|
deepshig/leetcode-solutions
|
f5c627215e79323dba3bb6d4005e35e33f31c858
|
1e99e0852b8329bf699eb149e7dfe312f82144bc
|
refs/heads/master
| 2022-11-30T20:50:02.007164
| 2020-08-06T19:21:02
| 2020-08-06T19:21:02
| 279,260,022
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,058
|
py
|
class Solution(object):
def maximumProduct(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
first_min, second_min = float("inf"), float("inf")
first_max, second_max, third_max = - \
float("inf"), -float("inf"), -float("inf")
for n in nums:
if n <= first_min:
second_min, first_min = first_min, n
elif n <= second_min:
second_min = n
if n >= first_max:
third_max, second_max, first_max = second_max, first_max, n
elif n >= second_max:
third_max, second_max = second_max, n
elif n >= third_max:
third_max = n
product_1 = first_min*second_min*first_max
product_2 = first_max*second_max*third_max
return max(product_1, product_2)
s = Solution()
print("Solution 1 : ", s.maximumProduct([1, 2, 3]))
print("Solution 2 : ", s.maximumProduct([1, 2, 3, 4]))
print("Solution 3 : ", s.maximumProduct([-4, -3, -2, -1, 60]))
|
[
"deepshi1104@gmail.com"
] |
deepshi1104@gmail.com
|
fdf0aba38c57fae8c11e4e5cdb741c8e4a0951be
|
743ad4ed116b838da917f105909d9e84e10a4b31
|
/day06/ResponseAndRequest/ResponseAndRequest/spiders/myspider.py
|
7568ce3d3f4c4f39a6c46469c61ae47cf2fc81a5
|
[] |
no_license
|
heiyouyou/Scrapy
|
62bb90638a8d6ee1aa62dcf525463c6b0a6a46e6
|
afa74f885d30ae3486b1da52dc90d0b7118f4dc1
|
refs/heads/master
| 2021-09-07T12:44:11.063627
| 2018-02-23T03:05:39
| 2018-02-23T03:05:39
| 113,541,279
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 196
|
py
|
import scrapy
class MySpider(scrapy.Spider):
name = 'myspider'
start_urls = ['http://example.com']
def parse(self,response):
print('Existing settings: %s' % self.settings.attributes.keys())
|
[
"1337238043@qq.com"
] |
1337238043@qq.com
|
6f0728d8e2f5aeeb689a2bb3c96ffab2ed3f3d84
|
3d154d9b3fe7487356d155c23d2b3541dacae1c1
|
/dao/userhelper.py
|
dc1d343b7a1cb51c8d4dcdd22c7e3441947cccc7
|
[] |
no_license
|
igortereshchenko/python_oracle_orm_service
|
ef847fff7d0762813edf64f54235f471cdccd62f
|
d824fa0f01b2fdcc92b053ea942bb382266a0b43
|
refs/heads/master
| 2020-05-23T12:06:54.729982
| 2019-05-16T19:12:05
| 2019-05-16T19:12:05
| 186,751,346
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,490
|
py
|
from dao.db import OracleDb
import cx_Oracle
class UserHelper:
def __init__(self):
self.db = OracleDb()
def getSkillData(self, skill_name=None):
if skill_name:
skill_name="'{0}'".format(skill_name)
else:
skill_name='null'
query = "select * from table(user_skillS.GetSkillData({0}))".format(skill_name)
result = self.db.execute(query)
return result.fetchall()
def getUserId(self, user_email, user_password):
user_id = self.db.cursor.callfunc("USER_AUTH.GET_USER_ID", cx_Oracle.NATIVE_INT, [user_email, user_password])
return user_id
def newUser(self, USER_STUDYBOOK, USER_YEAR, USER_NAME, USER_EMAIL, USER_BIRTHDAY, USER_PASSWORD):
cursor = self.db.cursor
user_id = cursor.var(cx_Oracle.NATIVE_INT)
status = cursor.var(cx_Oracle.STRING)
cursor.callproc("USER_AUTH.NEW_USER", [user_id, status, USER_STUDYBOOK, USER_YEAR, USER_NAME, USER_EMAIL, USER_BIRTHDAY.upper(), USER_PASSWORD])
return user_id.getvalue(), status.getvalue()
def getUsers(self):
return self.db.execute('SELECT * FROM "user"').fetchall()
if __name__ == "__main__":
helper = UserHelper()
print(helper.getSkillData('Java'))
print(helper.getSkillData())
print(helper.getUserId('PETRO@GMAIL.COM','222'))
print(helper.newUser('KM5555', '10-OCT-17', 'Kate', 'KATE@GMAIL.COM', '21-OCT-97','555'))
print(helper.getUsers())
|
[
"tereshchenko.igor@gmail.com"
] |
tereshchenko.igor@gmail.com
|
cc7e03607b31f4438fef4e654117ffd8353d2dc4
|
58afefdde86346760bea40690b1675c6639c8b84
|
/leetcode/minimum-initial-energy-to-finish-tasks/422867377.py
|
4ecf5f68fe76384049941bdc723c184406bd6d20
|
[] |
no_license
|
ausaki/data_structures_and_algorithms
|
aaa563f713cbab3c34a9465039d52b853f95548e
|
4f5f5124534bd4423356a5f5572b8a39b7828d80
|
refs/heads/master
| 2021-06-21T10:44:44.549601
| 2021-04-06T11:30:21
| 2021-04-06T11:30:21
| 201,942,771
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,041
|
py
|
# title: minimum-initial-energy-to-finish-tasks
# detail: https://leetcode.com/submissions/detail/422867377/
# datetime: Sun Nov 22 14:20:04 2020
# runtime: 1260 ms
# memory: 59.4 MB
class Solution:
def minimumEffort(self, tasks: List[List[int]]) -> int:
'''
假设T是tasks列表的最优排列,初始energy等于E,最后剩余的energy等于L,
题目转化为找到T和L使得E最小。
从后往前推导:
第n步,E[n] = L
第n - 1步,E[n - 1] = max(E[n] + T[n - 1][0], T[n - 1][1])
第n - 2步,E[n - 2] = max(E[n - 1] + T[n - 2][0], T[n - 2][1])
...
第0步, E[0] = max(E[1] + T[0][0], E[0] >= T[0][1])
递推公式: E[n] = max(E[n + 1] + T[n][0], E[n] >= T[n][1])
假设只有一个task, 那么E[0]等于T[0][1], E[1] = T[0][1] - T[0][0].
'''
tasks.sort(key=lambda p: p[1] - p[0])
e = 0
for a, m in tasks:
e = max(e + a, m)
return e
|
[
"ljm51689@gmail.com"
] |
ljm51689@gmail.com
|
4085500854bc565a03cb7ed04cbd39c6bb4c3dca
|
b7255be7fc09f7fd8178b820c6ff3c69d7e4d750
|
/flask-api/python_go/pythonjs/runtime/go_builtins.py
|
52c0758a59fb24623525b8c7a61e7f2eaaca7eb8
|
[] |
no_license
|
divyajyotiuk/go-asn-playground
|
e65bcd5474674005fb64567ec205b3b5f757e438
|
ee7fd0c57e86f84e045bbc888fb3c4f265bdb969
|
refs/heads/master
| 2021-02-05T02:09:02.338834
| 2020-04-13T10:40:31
| 2020-04-13T10:40:31
| 243,732,017
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,434
|
py
|
# PythonJS Go builtins
# by Brett Hartshorn - copyright 2014
# License: "New BSD"
import strconv
inline("""
type __object__ struct {
__class__ string
}
type object interface{
getclassname() string
}
func (self __object__) getclassname() string {
return self.__class__
}
func ord(x string) int {
r := []rune(x)
return int(r[0])
}
func __test_if_true__(v interface{}) bool {
switch v.(type) {
case nil:
return false
case int:
i,_ := v.(int)
return i != 0
case float64:
i,_ := v.(int)
return i != 0.0
case bool:
b,_ := v.(bool)
return b
case string:
s,_ := v.(string)
return s != ""
default:
return false
}
}
func str(v interface{}) string {
switch v.(type) {
case nil:
return "None"
case int:
i,_ := v.(int)
return strconv.Itoa(i)
case float64:
return "TODO float"
case bool:
b,_ := v.(bool)
if b { return "True"
} else { return "False" }
case string:
s,_ := v.(string)
return s
default:
return "TODO unknown type"
}
}
func range1( x int ) *[]int {
arr := make([]int, x)
for i := 0; i < x; i++ {
arr[i]=i
}
return &arr
}
func range2( start int, stop int ) *[]int {
arr := make([]int, stop-start)
for i := start; i < stop; i++ {
arr[i]=i
}
return &arr
}
func range3( start int, stop int, step int ) *[]int {
arr := make([]int, stop-start)
for i := start; i < stop; i+=step {
arr[i]=i
}
return &arr
}
""")
|
[
"you@example.com"
] |
you@example.com
|
bd866540d8720bd5ec59b2b3bc0c4f34b4c1c817
|
4cf14ded3e404a9801f7fc1103d7a72019fecd0c
|
/alembic/unused_versions/b70252e34014_use_sqlitecompat_module.py
|
f581905adf8096ee4040a7550b77a921cd531c63
|
[] |
no_license
|
elthran/RPG-Game
|
8315aac6b0b162e9233a901d5af5c018ca4bf9d1
|
6168d7938c72a5a0bb36ca40b96a2a7232021cb5
|
refs/heads/master
| 2018-09-23T14:51:27.111954
| 2018-06-12T18:28:02
| 2018-06-12T18:28:02
| 64,792,548
| 0
| 0
| null | 2018-06-12T18:32:20
| 2016-08-02T21:05:58
|
Python
|
UTF-8
|
Python
| false
| false
| 992
|
py
|
"""Use SQLiteCompat module to drop a column.
Also first revision, yay!
IMPORTANT!!
Fancy method to drop a column when using SQLite.
Yes, it it super long and stupidly complex.
All it does is replicate:
op.drop_column('forum', 'title')
Revision ID: b70252e34014
Revises:
Create Date: 2018-01-31 20:48:18.530044
"""
from alembic import op
import sqlalchemy as sa
import sys
import os
# Get the name of the current directory for this file and split it.
old_path = os.path.dirname(os.path.abspath(__file__)).split(os.sep)
new_path = os.sep.join(old_path[:-1])
# -1 refers to how many levels of directory to go up
sys.path.insert(0, new_path)
from sqlite_compat import SQLiteCompat
sys.path.pop(0)
# revision identifiers, used by Alembic.
revision = 'b70252e34014'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.add_column('forum', sa.Column('title', sa.String))
def downgrade():
compat = SQLiteCompat()
compat.drop_column('forum', 'title')
|
[
"klondikemarlen@gmail.com"
] |
klondikemarlen@gmail.com
|
567b57a6d4840a4a37fbb66684b700fe4a6dd28f
|
74983098c5de53007bde6052a631845c781b5ba8
|
/hartmann6/hartmann6_54/hartmann6.py
|
bbbeab0123de21bfab6548b67e88e58a1eaed978
|
[] |
no_license
|
numairmansur/Experiments
|
94ccdd60f4c2cf538fab41556ac72405656c9d77
|
592f39916461c7a9f7d400fa26f849043d1377ed
|
refs/heads/master
| 2021-04-29T12:39:16.845074
| 2017-02-15T07:36:47
| 2017-02-15T07:36:47
| 78,043,284
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 653
|
py
|
import numpy as np
import sys
import math
import time
import csv
from hpolib.benchmarks.synthetic_functions import Hartmann6
from time import gmtime, strftime
def main(job_id, params):
print '!!! Entered Main !!!'
print 'Anything printed here will end up in the output directory for job #:', str(job_id)
print params
f = Hartmann6()
res = f.objective_function([params['x'], params['y'], params['z'], params['xx'], params['yy'], params['zz']])
print res
with open('/home/mansurm/Experiments/hartmann6/run54.csv','a') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow([res['main'][0]])
return res['main'][0]
|
[
"numair.mansur@gmail.com"
] |
numair.mansur@gmail.com
|
1bf849a5f322986e8eb6180f2477adc70b8f1651
|
63b0fed007d152fe5e96640b844081c07ca20a11
|
/yukicoder/MMA Contest 016/f.py
|
8898f332475726cac9f98f6195e8c1c880d65675
|
[] |
no_license
|
Nikkuniku/AtcoderProgramming
|
8ff54541c8e65d0c93ce42f3a98aec061adf2f05
|
fbaf7b40084c52e35c803b6b03346f2a06fb5367
|
refs/heads/master
| 2023-08-21T10:20:43.520468
| 2023-08-12T09:53:07
| 2023-08-12T09:53:07
| 254,373,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 270
|
py
|
S = input()
N = len(S)
alp = [[] for _ in range(26)]
ans = 0
for i, v in enumerate(S):
alp[ord(v)-65].append(i)
for i in range(26):
for j in range(1, len(alp[i])):
idx = alp[i][j]
tmp = j*(N-idx-1-(len(alp[i])-j-1))
ans += tmp
print(ans)
|
[
"ymdysk911@gmail.com"
] |
ymdysk911@gmail.com
|
dad7e56953388c605ffb3cee21d10c587cc1f059
|
3002ce1c3a5628386fe747c8f1733b2f482780b9
|
/tests/densities/test_posterior_gp_classification_ard.py
|
ce104a3bdc897f1397b9c19df1fb5cae0676e0e7
|
[] |
no_license
|
exord/kernel_hmc
|
eb74f58d4c4a5a121f7383ba11dabfc1f1c95220
|
f74e9eafe5637c4c95af1823d629140ca3b4b909
|
refs/heads/master
| 2020-03-19T10:28:05.767470
| 2018-06-06T19:01:04
| 2018-06-06T19:01:04
| 136,373,212
| 0
| 0
| null | 2018-06-06T18:54:42
| 2018-06-06T18:54:42
| null |
UTF-8
|
Python
| false
| false
| 674
|
py
|
# depends on shogun, which might not be available
from nose import SkipTest
import numpy as np
try:
from kernel_hmc.densities.posterior_gp_classification_ard import GlassPosterior
glass_available = True
except ImportError:
glass_available = False
def test_glass_posterior_setup_execute():
if not glass_available:
raise SkipTest("Shogun not available")
GlassPosterior().set_up()
def test_glass_posterior_log_pdf_execute():
if not glass_available:
raise SkipTest("Shogun not available")
D = 9
theta = np.random.randn(D)
target = GlassPosterior()
target.set_up()
target.log_pdf(theta)
|
[
"heiko.strathmann@gmail.com"
] |
heiko.strathmann@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.