blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d15740ee02928888baf46ea11ad875d8f4194fcc
|
da0a7446122a44887fa2c4f391e9630ae033daa2
|
/python/ray/train/tests/test_torch_trainer.py
|
8ccc79b096a47fcfdea805fb3ec786775f9cd30b
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
whiledoing/ray
|
d8d9ba09b7545e8fd00cca5cfad451278e61fffd
|
9272bcbbcae1630c5bb2db08a8279f0401ce6f92
|
refs/heads/master
| 2023-03-06T16:23:18.006757
| 2022-07-22T02:06:47
| 2022-07-22T02:06:47
| 252,420,044
| 0
| 0
|
Apache-2.0
| 2023-03-04T08:57:20
| 2020-04-02T10:07:23
|
Python
|
UTF-8
|
Python
| false
| false
| 3,493
|
py
|
import pytest
from ray.air import session
from ray.air.checkpoint import Checkpoint
import torch
import ray
from ray.air.examples.pytorch.torch_linear_example import (
train_func as linear_train_func,
)
from ray.train.torch import TorchPredictor, TorchTrainer
from ray.tune import TuneError
from ray.air.config import ScalingConfig
@pytest.fixture
def ray_start_4_cpus():
address_info = ray.init(num_cpus=4)
yield address_info
# The code after the yield will run as teardown code.
ray.shutdown()
@pytest.mark.parametrize("num_workers", [1, 2])
def test_torch_linear(ray_start_4_cpus, num_workers):
def train_func(config):
result = linear_train_func(config)
assert len(result) == epochs
assert result[-1]["loss"] < result[0]["loss"]
num_workers = num_workers
epochs = 3
scaling_config = ScalingConfig(num_workers=num_workers)
config = {"lr": 1e-2, "hidden_size": 1, "batch_size": 4, "epochs": epochs}
trainer = TorchTrainer(
train_loop_per_worker=train_func,
train_loop_config=config,
scaling_config=scaling_config,
)
trainer.fit()
def test_torch_e2e(ray_start_4_cpus):
def train_func():
model = torch.nn.Linear(1, 1)
session.report({}, checkpoint=Checkpoint.from_dict(dict(model=model)))
scaling_config = ScalingConfig(num_workers=2)
trainer = TorchTrainer(
train_loop_per_worker=train_func, scaling_config=scaling_config
)
result = trainer.fit()
predict_dataset = ray.data.range(3)
class TorchScorer:
def __init__(self):
self.pred = TorchPredictor.from_checkpoint(result.checkpoint)
def __call__(self, x):
return self.pred.predict(x, dtype=torch.float)
predictions = predict_dataset.map_batches(
TorchScorer, batch_format="pandas", compute="actors"
)
assert predictions.count() == 3
def test_torch_e2e_state_dict(ray_start_4_cpus):
def train_func():
model = torch.nn.Linear(1, 1).state_dict()
session.report({}, checkpoint=Checkpoint.from_dict(dict(model=model)))
scaling_config = ScalingConfig(num_workers=2)
trainer = TorchTrainer(
train_loop_per_worker=train_func, scaling_config=scaling_config
)
result = trainer.fit()
# If loading from a state dict, a model definition must be passed in.
with pytest.raises(ValueError):
TorchPredictor.from_checkpoint(result.checkpoint)
class TorchScorer:
def __init__(self):
self.pred = TorchPredictor.from_checkpoint(
result.checkpoint, model=torch.nn.Linear(1, 1)
)
def __call__(self, x):
return self.pred.predict(x, dtype=torch.float)
predict_dataset = ray.data.range(3)
predictions = predict_dataset.map_batches(
TorchScorer, batch_format="pandas", compute="actors"
)
assert predictions.count() == 3
def test_checkpoint_freq(ray_start_4_cpus):
# checkpoint_freq is not supported so raise an error
trainer = TorchTrainer(
train_loop_per_worker=lambda config: None,
scaling_config=ray.air.ScalingConfig(num_workers=1),
run_config=ray.air.RunConfig(
checkpoint_config=ray.air.CheckpointConfig(
checkpoint_frequency=2,
),
),
)
with pytest.raises(TuneError):
trainer.fit()
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-x", __file__]))
|
[
"noreply@github.com"
] |
whiledoing.noreply@github.com
|
174dff48ac9b3bf40f36add33d6c4d489b16d1c6
|
267867b5bcaeeed26228a295b877a62473e1d1bb
|
/scripts/python/client.py
|
f4a4d313384dccede10d4ca0c3367a4ffb51601e
|
[] |
no_license
|
gitgaoqian/cloud_v2
|
602a2a3ea8e4457606ce9f1a55e2166c9b227781
|
b35d89132799042ac34eba435b76c311c2a4b287
|
refs/heads/master
| 2021-05-06T07:08:52.055547
| 2021-01-24T12:25:38
| 2021-01-24T12:25:38
| 113,935,952
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 546
|
py
|
#!/usr/bin/env python
import sys
import rospy
from cloud_v2.srv import call
def local_client(service,action):
rospy.wait_for_service('bridge_service')
try:
client = rospy.ServiceProxy('bridge_service', call)
resp1 = client(service,action)
return resp1
except rospy.ServiceException, e:
print "Service call failed: %s"%e
if __name__ == "__main__":
service = str(sys.argv[1])
action = str(sys.argv[2])
print "request "+service+' '+action
print " %s"%(local_client(service,action))
|
[
"734756851@qq.com"
] |
734756851@qq.com
|
0cce847454191ea31bb62590d59b1466784e8cc7
|
ef8c5c55b6ec3971adff9afe2db1f76556b87082
|
/code_examples.bak/wave2d_numpy_f90_cuda/wave2d_2plot.py
|
8ecd2c9c6ad280e4676bd5d9c627e86e1cdfa832
|
[] |
no_license
|
wbkifun/my_stuff
|
7007efc94b678234097abf0df9babfbd79dcf0ff
|
0b5ad5d4d103fd05989b514bca0d5114691f8ff7
|
refs/heads/master
| 2020-12-10T22:40:28.532993
| 2017-11-15T11:39:41
| 2017-11-15T11:39:41
| 5,178,225
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,863
|
py
|
#------------------------------------------------------------------------------
# Author : Ki-Hwan Kim (kh.kim@kiaps.org)
#
# Written date : 2010. 6. 17
# Modify date : 2012. 9. 17
#
# Copyright : GNU GPL
#
# Description :
# Solve the 2-D wave equation with the FD(Finite-Difference) scheme
#
# These are educational codes to study the scientific python programming.
# Step 1: Using the numpy
# Step 2: Convert the hotspot to the Fortran code using F2PY
# Step 3: Convert the hotspot to the CUDA code using PyCUDA
#------------------------------------------------------------------------------
from __future__ import division
import numpy
import matplotlib.pyplot as plt
from time import time
#from core_numpy import advance
from core_fortran import advance
# Setup
nx, ny = 2400, 2000
tmax, tgap = 1500, 40
c = numpy.ones((nx,ny), order='F')*0.25
f = numpy.zeros_like(c, order='F')
g = numpy.zeros_like(c, order='F')
# Plot using the matplotlib
plt.ion()
fig = plt.figure(figsize=(8,10))
ax1 = fig.add_subplot(2,1,1)
ax1.plot([nx//2,nx//2], [0,ny], '--k')
imag = ax1.imshow(c.T, origin='lower', vmin=-0.1, vmax=0.1)
fig.colorbar(imag)
ax2 = fig.add_subplot(2,1,2)
line, = ax2.plot(c[nx//2,:])
ax2.set_xlim(0, ny)
ax2.set_ylim(-0.1, 0.1)
# Main loop for the time evolution
t0 = time()
f_avg = numpy.zeros(ny)
for tn in xrange(1,tmax+1):
#g[nx//3,ny//2] += numpy.sin(0.05*numpy.pi*tn)
g[nx//3,ny//2+100] += numpy.sin(0.05*numpy.pi*tn)
g[nx//3,ny//2-100] += numpy.sin(0.05*numpy.pi*tn)
advance(c, f, g)
advance(c, g, f)
f_avg[:] += f[nx//2,:]**2
if tn%tgap == 0:
print "%d (%d %%)" % (tn, tn/tmax*100)
imag.set_array(f.T)
line.set_ydata(f_avg)
f_avg[:] = 0
plt.draw()
#plt.savefig('./png/%.5d.png' % tn)
print "throughput: %1.3f Mcell/s" % (nx*ny*tmax/(time()-t0)/1e6)
|
[
"kh.kim@kiaps.org"
] |
kh.kim@kiaps.org
|
ea43c9fb9d97749418f21a7acb65a4abd48d203b
|
786027545626c24486753351d6e19093b261cd7d
|
/ghidra9.2.1_pyi/ghidra/file/formats/ios/decmpfs/DecmpfsHeader.pyi
|
14fadb23d185b60636313cbd85e5c0fceeb233fc
|
[
"MIT"
] |
permissive
|
kohnakagawa/ghidra_scripts
|
51cede1874ef2b1fed901b802316449b4bf25661
|
5afed1234a7266c0624ec445133280993077c376
|
refs/heads/main
| 2023-03-25T08:25:16.842142
| 2021-03-18T13:31:40
| 2021-03-18T13:31:40
| 338,577,905
| 14
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,782
|
pyi
|
from typing import List
import ghidra.app.util.bin
import ghidra.program.model.data
import java.lang
class DecmpfsHeader(object, ghidra.app.util.bin.StructConverter):
ASCII: ghidra.program.model.data.DataType = char
BYTE: ghidra.program.model.data.DataType = byte
DWORD: ghidra.program.model.data.DataType = dword
IBO32: ghidra.program.model.data.DataType = ImageBaseOffset32
POINTER: ghidra.program.model.data.DataType = pointer
QWORD: ghidra.program.model.data.DataType = qword
STRING: ghidra.program.model.data.DataType = string
UTF16: ghidra.program.model.data.DataType = unicode
UTF8: ghidra.program.model.data.DataType = string-utf8
VOID: ghidra.program.model.data.DataType = void
WORD: ghidra.program.model.data.DataType = word
def __init__(self, __a0: ghidra.app.util.bin.BinaryReader, __a1: int): ...
def equals(self, __a0: object) -> bool: ...
def getAttrBytes(self) -> List[int]: ...
def getClass(self) -> java.lang.Class: ...
def getCompressionMagic(self) -> unicode: ...
def getCompressionType(self) -> int: ...
def getUncompressedSize(self) -> long: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toDataType(self) -> ghidra.program.model.data.DataType: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def attrBytes(self) -> List[int]: ...
@property
def compressionMagic(self) -> unicode: ...
@property
def compressionType(self) -> int: ...
@property
def uncompressedSize(self) -> long: ...
|
[
"tsunekou1019@gmail.com"
] |
tsunekou1019@gmail.com
|
fdfbe937a2a646632ade4d132625cc393a0fe83c
|
90cd41da01e181bf689feb6d305a2610c88e3902
|
/senlin/tests/tempest/api/clusters/test_cluster_delete_negative.py
|
5e9aab7633647c77ac10cac70360dffb82e45287
|
[
"Apache-2.0"
] |
permissive
|
paperandsoap/senlin
|
368980e1fb01d91659f8b0d7dd532c3260386fa7
|
5d98dae3911aa4d5b71e491f3a4e0c21371cc75a
|
refs/heads/master
| 2020-12-25T17:45:02.490958
| 2016-05-29T14:42:39
| 2016-05-29T14:42:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,100
|
py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from tempest.lib import exceptions
from tempest import test
from senlin.tests.tempest.api import base
from senlin.tests.tempest.api import utils
class TestClusterDeleteNegative(base.BaseSenlinTest):
def setUp(self):
super(TestClusterDeleteNegative, self).setUp()
profile_id = utils.create_a_profile(self)
self.addCleanup(utils.delete_a_profile, self, profile_id)
self.cluster_id = utils.create_a_cluster(self, profile_id)
self.addCleanup(utils.delete_a_cluster, self, self.cluster_id)
policy_id = utils.create_a_policy(self)
self.addCleanup(utils.delete_a_policy, self, policy_id)
utils.attach_policy(self, self.cluster_id, policy_id)
self.addCleanup(utils.detach_policy, self, self.cluster_id, policy_id)
@test.attr(type=['negative'])
@decorators.idempotent_id('0de81427-2b2f-4821-9462-c893d35fb212')
def test_cluster_delete_conflict(self):
# Verify conflict exception(409) is raised.
self.assertRaises(exceptions.Conflict,
self.client.delete_obj,
'clusters', self.cluster_id)
@test.attr(type=['negative'])
@decorators.idempotent_id('8a583b8e-eeaa-4920-a6f5-2880b070624f')
def test_cluster_delete_not_found(self):
# Verify notfound exception(404) is raised.
self.assertRaises(exceptions.NotFound,
self.client.delete_obj,
'clusters', '8a583b8e-eeaa-4920-a6f5-2880b070624f')
|
[
"tengqim@cn.ibm.com"
] |
tengqim@cn.ibm.com
|
84c5278933ccbfe033e99a67fd923dea7a9c80d2
|
2af6a5c2d33e2046a1d25ae9dd66d349d3833940
|
/res_bw/scripts/common/lib/encodings/utf_32.py
|
6a7ba7022f222c97d748b69be9c9ec7f90231268
|
[] |
no_license
|
webiumsk/WOT-0.9.12-CT
|
e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2
|
2506e34bd6634ad500b6501f4ed4f04af3f43fa0
|
refs/heads/master
| 2021-01-10T01:38:38.080814
| 2015-11-11T00:08:04
| 2015-11-11T00:08:04
| 45,803,240
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 4,805
|
py
|
# 2015.11.10 21:35:58 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/encodings/utf_32.py
"""
Python 'utf-32' Codec
"""
import codecs, sys
encode = codecs.utf_32_encode
def decode(input, errors = 'strict'):
return codecs.utf_32_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors = 'strict'):
codecs.IncrementalEncoder.__init__(self, errors)
self.encoder = None
return
def encode(self, input, final = False):
if self.encoder is None:
result = codecs.utf_32_encode(input, self.errors)[0]
if sys.byteorder == 'little':
self.encoder = codecs.utf_32_le_encode
else:
self.encoder = codecs.utf_32_be_encode
return result
else:
return self.encoder(input, self.errors)[0]
def reset(self):
codecs.IncrementalEncoder.reset(self)
self.encoder = None
return
def getstate(self):
if self.encoder is None:
return 2
else:
return 0
def setstate(self, state):
if state:
self.encoder = None
elif sys.byteorder == 'little':
self.encoder = codecs.utf_32_le_encode
else:
self.encoder = codecs.utf_32_be_encode
return
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
def __init__(self, errors = 'strict'):
codecs.BufferedIncrementalDecoder.__init__(self, errors)
self.decoder = None
return
def _buffer_decode(self, input, errors, final):
if self.decoder is None:
output, consumed, byteorder = codecs.utf_32_ex_decode(input, errors, 0, final)
if byteorder == -1:
self.decoder = codecs.utf_32_le_decode
elif byteorder == 1:
self.decoder = codecs.utf_32_be_decode
elif consumed >= 4:
raise UnicodeError('UTF-32 stream does not start with BOM')
return (output, consumed)
else:
return self.decoder(input, self.errors, final)
def reset(self):
codecs.BufferedIncrementalDecoder.reset(self)
self.decoder = None
return
def getstate(self):
state = codecs.BufferedIncrementalDecoder.getstate(self)[0]
if self.decoder is None:
return (state, 2)
else:
addstate = int((sys.byteorder == 'big') != (self.decoder is codecs.utf_32_be_decode))
return (state, addstate)
def setstate(self, state):
codecs.BufferedIncrementalDecoder.setstate(self, state)
state = state[1]
if state == 0:
self.decoder = codecs.utf_32_be_decode if sys.byteorder == 'big' else codecs.utf_32_le_decode
elif state == 1:
self.decoder = codecs.utf_32_le_decode if sys.byteorder == 'big' else codecs.utf_32_be_decode
else:
self.decoder = None
return
class StreamWriter(codecs.StreamWriter):
def __init__(self, stream, errors = 'strict'):
self.encoder = None
codecs.StreamWriter.__init__(self, stream, errors)
return
def reset(self):
codecs.StreamWriter.reset(self)
self.encoder = None
return
def encode(self, input, errors = 'strict'):
if self.encoder is None:
result = codecs.utf_32_encode(input, errors)
if sys.byteorder == 'little':
self.encoder = codecs.utf_32_le_encode
else:
self.encoder = codecs.utf_32_be_encode
return result
else:
return self.encoder(input, errors)
return
class StreamReader(codecs.StreamReader):
def reset(self):
codecs.StreamReader.reset(self)
try:
del self.decode
except AttributeError:
pass
def decode(self, input, errors = 'strict'):
object, consumed, byteorder = codecs.utf_32_ex_decode(input, errors, 0, False)
if byteorder == -1:
self.decode = codecs.utf_32_le_decode
elif byteorder == 1:
self.decode = codecs.utf_32_be_decode
elif consumed >= 4:
raise UnicodeError, 'UTF-32 stream does not start with BOM'
return (object, consumed)
def getregentry():
return codecs.CodecInfo(name='utf-32', encode=encode, decode=decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter)
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\encodings\utf_32.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:35:58 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
856f194b3643fa63d846f488a7b5eaa655451e14
|
e00186e71a1f52b394315a0cbc27162254cfffb9
|
/durga/full_durga/withrestc1/withrestc1/asgi.py
|
9033cff3915786e32a148f3a2d204f1ea519d37c
|
[] |
no_license
|
anilkumar0470/git_practice
|
cf132eb7970c40d0d032520d43e6d4a1aca90742
|
588e7f654f158e974f9893e5018d3367a0d88eeb
|
refs/heads/master
| 2023-04-27T04:50:14.688534
| 2023-04-22T05:54:21
| 2023-04-22T05:54:21
| 100,364,712
| 0
| 1
| null | 2021-12-08T19:44:58
| 2017-08-15T10:02:33
|
Python
|
UTF-8
|
Python
| false
| false
| 397
|
py
|
"""
ASGI config for withrestc1 project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'withrestc1.settings')
application = get_asgi_application()
|
[
"anilkumar.0466@gmail.com"
] |
anilkumar.0466@gmail.com
|
d76a7713552cf333afc6141ab7be1d25632b568f
|
a777170c979214015df511999f5f08fc2e0533d8
|
/claf/data/reader/bert/glue/qnli.py
|
4b5a99b0e21fb352e87ffcf6d20182905ddebe0e
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
srlee-ai/claf
|
210b2d51918cf210683e7489ccb8347cb8b1f146
|
89b3e5c5ec0486886876ea3bac381508c6a6bf58
|
refs/heads/master
| 2021-02-13T04:38:36.198288
| 2020-03-03T15:01:01
| 2020-03-03T15:01:01
| 244,661,892
| 0
| 0
|
MIT
| 2020-03-03T14:45:52
| 2020-03-03T14:45:52
| null |
UTF-8
|
Python
| false
| false
| 1,660
|
py
|
import logging
from overrides import overrides
from claf.data.reader import SeqClsBertReader
from claf.decorator import register
logger = logging.getLogger(__name__)
@register("reader:qnli_bert")
class QNLIBertReader(SeqClsBertReader):
"""
QNLI DataReader for BERT
* Args:
file_paths: .tsv file paths (train and dev)
tokenizers: defined tokenizers config
"""
CLASS_DATA = ["entailment", "not_entailment"]
METRIC_KEY = "accuracy"
def __init__(
self,
file_paths,
tokenizers,
sequence_max_length=None,
cls_token="[CLS]",
sep_token="[SEP]",
input_type="bert",
is_test=False,
):
super(QNLIBertReader, self).__init__(
file_paths,
tokenizers,
sequence_max_length,
class_key=None,
cls_token=cls_token,
sep_token=sep_token,
input_type=input_type,
is_test=is_test,
)
@overrides
def _get_data(self, file_path, **kwargs):
data_type = kwargs["data_type"]
_file = self.data_handler.read(file_path)
lines = _file.split("\n")
data = []
for i, line in enumerate(lines):
if i == 0:
continue
line_tokens = line.split("\t")
if len(line_tokens) <= 1:
continue
data.append({
"uid": f"qnli-{file_path}-{data_type}-{i}",
"sequence_a": line_tokens[1],
"sequence_b": line_tokens[2],
self.class_key: str(line_tokens[-1]),
})
return data
|
[
"humanbrain.djlee@gmail.com"
] |
humanbrain.djlee@gmail.com
|
33ccbd6363bc7859a7fc752f05cad29813f8f354
|
bd72c02af0bbd8e3fc0d0b131e3fb9a2aaa93e75
|
/Tree/binary_tree_paths.py
|
3eaa605aa96b08ee364375160ae8865b257bf882
|
[] |
no_license
|
harvi7/Leetcode-Problems-Python
|
d3a5e8898aceb11abc4cae12e1da50061c1d352c
|
73adc00f6853e821592c68f5dddf0a823cce5d87
|
refs/heads/master
| 2023-05-11T09:03:03.181590
| 2023-04-29T22:03:41
| 2023-04-29T22:03:41
| 222,657,838
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 482
|
py
|
class Solution:
def binaryTreePaths(self, root: TreeNode) -> List[str]:
paths = []
if not root: return paths
self.dfs(root, "", paths)
return paths
def dfs(self, root, path, paths):
path = path + str(root.val)
if not root.left and not root.right:
paths.append(path)
return
if root.left: self.dfs(root.left, path + "->", paths)
if root.right: self.dfs(root.right, path+ "->", paths)
|
[
"iamharshvirani7@gmail.com"
] |
iamharshvirani7@gmail.com
|
5e7794458fd0973c267d8c6df2b78d63abcb4d98
|
be9960512ddf562516c4f2d909577fc6b9750f19
|
/packages/jet_bridge_base/jet_bridge_base/filters/filter_class.py
|
80997d750eb1d9a781847a4872c1e3298f7df9e4
|
[
"MIT"
] |
permissive
|
timgates42/jet-bridge
|
9abdc8bdf420c720a30d6db163649a2a74c6b829
|
80c1f3a96dc467fd8c98cbdfbda2e42aa6a1d3b4
|
refs/heads/master
| 2023-03-16T03:19:08.358590
| 2022-06-14T16:26:14
| 2022-06-14T16:26:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,993
|
py
|
from jet_bridge_base.utils.queryset import get_session_engine
from sqlalchemy import inspect
from jet_bridge_base.filters import lookups
from jet_bridge_base.filters.filter import Filter
from jet_bridge_base.filters.filter_for_dbfield import filter_for_data_type
class FilterClass(object):
filters = []
def __init__(self, *args, **kwargs):
self.meta = getattr(self, 'Meta', None)
if 'context' in kwargs:
self.handler = kwargs['context'].get('handler', None)
self.update_filters()
def update_filters(self):
filters = []
if self.meta:
if hasattr(self.meta, 'model'):
Model = self.meta.model
mapper = inspect(Model)
columns = mapper.columns
if hasattr(self.meta, 'fields'):
columns = filter(lambda x: x.name in self.meta.fields, columns)
for column in columns:
item = filter_for_data_type(column.type)
for lookup in item['lookups']:
for exclude in [False, True]:
instance = item['filter_class'](
name=column.key,
column=column,
lookup=lookup,
exclude=exclude
)
filters.append(instance)
declared_filters = filter(lambda x: isinstance(x[1], Filter), map(lambda x: (x, getattr(self, x)), dir(self)))
for filter_name, filter_item in declared_filters:
filter_item.name = filter_name
filter_item.model = Model
filter_item.handler = self.handler
filters.append(filter_item)
self.filters = filters
def filter_queryset(self, request, queryset):
session = request.session
def get_filter_value(name, filters_instance=None):
value = request.get_argument_safe(name, None)
if filters_instance and value is not None and get_session_engine(session) == 'bigquery':
python_type = filters_instance.column.type.python_type
value = python_type(value)
return value
for item in self.filters:
if item.name:
argument_name = '{}__{}'.format(item.name, item.lookup)
if item.exclude:
argument_name = 'exclude__{}'.format(argument_name)
value = get_filter_value(argument_name, item)
if value is None and item.lookup == lookups.DEFAULT_LOOKUP:
argument_name = item.name
if item.exclude:
argument_name = 'exclude__{}'.format(argument_name)
value = get_filter_value(argument_name, item)
else:
value = None
queryset = item.filter(queryset, value)
return queryset
|
[
"f1nal@cgaming.org"
] |
f1nal@cgaming.org
|
0b33acfc0a8e92200b6bb9ff425ee92732e063f5
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/24/usersdata/146/11428/submittedfiles/av1_m3.py
|
5a721a4808aefc5e4ee7ce452cf07957f8fd7305
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 330
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import math
m = input('Digite o numero de termos: ')
soma = 0
i = 1
den = 2
while i<=m :
if i%2==0:
soma = soma - 4/((den)*(den+1)*(den+2))
else:
soma = soma + 4/((den+2)*(den+1)*(den+2))
den = den+1
i = i+1
pi = 3+soma
print ('%.6f'%pi)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
1b330ed34ebbd7922f0d6f1b9d56a7a7c71f35db
|
f819fe72c5b18b42a25a71dc2900c7fa80e17811
|
/pandas/_libs/tslibs/timestamps.pyi
|
4de51d4dc7dd8f5b04a87cd41a4ae72ca0ed76a2
|
[
"BSD-3-Clause"
] |
permissive
|
JMBurley/pandas
|
34d101425acb0ac35a53bcf29fbd47c2d4c88fda
|
b74dc5c077971301c5b9ff577fa362943f3c3a17
|
refs/heads/master
| 2022-11-06T00:48:41.465865
| 2022-06-13T19:30:11
| 2022-06-13T19:30:11
| 229,853,377
| 1
| 0
|
BSD-3-Clause
| 2019-12-24T02:11:54
| 2019-12-24T02:11:53
| null |
UTF-8
|
Python
| false
| false
| 7,504
|
pyi
|
from datetime import (
date as _date,
datetime,
time as _time,
timedelta,
tzinfo as _tzinfo,
)
from time import struct_time
from typing import (
ClassVar,
TypeVar,
overload,
)
import numpy as np
from pandas._libs.tslibs import (
BaseOffset,
Period,
Tick,
Timedelta,
)
_DatetimeT = TypeVar("_DatetimeT", bound=datetime)
def integer_op_not_supported(obj: object) -> TypeError: ...
class Timestamp(datetime):
min: ClassVar[Timestamp]
max: ClassVar[Timestamp]
resolution: ClassVar[Timedelta]
value: int # np.int64
def __new__(
cls: type[_DatetimeT],
ts_input: int
| np.integer
| float
| str
| _date
| datetime
| np.datetime64 = ...,
freq: int | None | str | BaseOffset = ...,
tz: str | _tzinfo | None | int = ...,
unit: str | int | None = ...,
year: int | None = ...,
month: int | None = ...,
day: int | None = ...,
hour: int | None = ...,
minute: int | None = ...,
second: int | None = ...,
microsecond: int | None = ...,
nanosecond: int | None = ...,
tzinfo: _tzinfo | None = ...,
*,
fold: int | None = ...,
) -> _DatetimeT: ...
# GH 46171
# While Timestamp can return pd.NaT, having the constructor return
# a Union with NaTType makes things awkward for users of pandas
def _set_freq(self, freq: BaseOffset | None) -> None: ...
@classmethod
def _from_value_and_reso(
cls, value: int, reso: int, tz: _tzinfo | None
) -> Timestamp: ...
@property
def year(self) -> int: ...
@property
def month(self) -> int: ...
@property
def day(self) -> int: ...
@property
def hour(self) -> int: ...
@property
def minute(self) -> int: ...
@property
def second(self) -> int: ...
@property
def microsecond(self) -> int: ...
@property
def tzinfo(self) -> _tzinfo | None: ...
@property
def tz(self) -> _tzinfo | None: ...
@property
def fold(self) -> int: ...
@classmethod
def fromtimestamp(
cls: type[_DatetimeT], t: float, tz: _tzinfo | None = ...
) -> _DatetimeT: ...
@classmethod
def utcfromtimestamp(cls: type[_DatetimeT], t: float) -> _DatetimeT: ...
@classmethod
def today(cls: type[_DatetimeT], tz: _tzinfo | str | None = ...) -> _DatetimeT: ...
@classmethod
def fromordinal(
cls: type[_DatetimeT],
ordinal: int,
freq: str | BaseOffset | None = ...,
tz: _tzinfo | str | None = ...,
) -> _DatetimeT: ...
@classmethod
def now(cls: type[_DatetimeT], tz: _tzinfo | str | None = ...) -> _DatetimeT: ...
@classmethod
def utcnow(cls: type[_DatetimeT]) -> _DatetimeT: ...
# error: Signature of "combine" incompatible with supertype "datetime"
@classmethod
def combine(cls, date: _date, time: _time) -> datetime: ... # type: ignore[override]
@classmethod
def fromisoformat(cls: type[_DatetimeT], date_string: str) -> _DatetimeT: ...
def strftime(self, format: str) -> str: ...
def __format__(self, fmt: str) -> str: ...
def toordinal(self) -> int: ...
def timetuple(self) -> struct_time: ...
def timestamp(self) -> float: ...
def utctimetuple(self) -> struct_time: ...
def date(self) -> _date: ...
def time(self) -> _time: ...
def timetz(self) -> _time: ...
def replace(
self: _DatetimeT,
year: int = ...,
month: int = ...,
day: int = ...,
hour: int = ...,
minute: int = ...,
second: int = ...,
microsecond: int = ...,
tzinfo: _tzinfo | None = ...,
fold: int = ...,
) -> _DatetimeT: ...
def astimezone(self: _DatetimeT, tz: _tzinfo | None = ...) -> _DatetimeT: ...
def ctime(self) -> str: ...
def isoformat(self, sep: str = ..., timespec: str = ...) -> str: ...
@classmethod
def strptime(cls, date_string: str, format: str) -> datetime: ...
def utcoffset(self) -> timedelta | None: ...
def tzname(self) -> str | None: ...
def dst(self) -> timedelta | None: ...
def __le__(self, other: datetime) -> bool: ... # type: ignore[override]
def __lt__(self, other: datetime) -> bool: ... # type: ignore[override]
def __ge__(self, other: datetime) -> bool: ... # type: ignore[override]
def __gt__(self, other: datetime) -> bool: ... # type: ignore[override]
# error: Signature of "__add__" incompatible with supertype "date"/"datetime"
@overload # type: ignore[override]
def __add__(self, other: np.ndarray) -> np.ndarray: ...
@overload
def __add__(
self: _DatetimeT, other: timedelta | np.timedelta64 | Tick
) -> _DatetimeT: ...
def __radd__(self: _DatetimeT, other: timedelta) -> _DatetimeT: ...
@overload # type: ignore[override]
def __sub__(self, other: datetime) -> Timedelta: ...
@overload
def __sub__(
self: _DatetimeT, other: timedelta | np.timedelta64 | Tick
) -> _DatetimeT: ...
def __hash__(self) -> int: ...
def weekday(self) -> int: ...
def isoweekday(self) -> int: ...
def isocalendar(self) -> tuple[int, int, int]: ...
@property
def is_leap_year(self) -> bool: ...
@property
def is_month_start(self) -> bool: ...
@property
def is_quarter_start(self) -> bool: ...
@property
def is_year_start(self) -> bool: ...
@property
def is_month_end(self) -> bool: ...
@property
def is_quarter_end(self) -> bool: ...
@property
def is_year_end(self) -> bool: ...
def to_pydatetime(self, warn: bool = ...) -> datetime: ...
def to_datetime64(self) -> np.datetime64: ...
def to_period(self, freq: BaseOffset | str | None = ...) -> Period: ...
def to_julian_date(self) -> np.float64: ...
@property
def asm8(self) -> np.datetime64: ...
def tz_convert(self: _DatetimeT, tz: _tzinfo | str | None) -> _DatetimeT: ...
# TODO: could return NaT?
def tz_localize(
self: _DatetimeT,
tz: _tzinfo | str | None,
ambiguous: str = ...,
nonexistent: str = ...,
) -> _DatetimeT: ...
def normalize(self: _DatetimeT) -> _DatetimeT: ...
# TODO: round/floor/ceil could return NaT?
def round(
self: _DatetimeT, freq: str, ambiguous: bool | str = ..., nonexistent: str = ...
) -> _DatetimeT: ...
def floor(
self: _DatetimeT, freq: str, ambiguous: bool | str = ..., nonexistent: str = ...
) -> _DatetimeT: ...
def ceil(
self: _DatetimeT, freq: str, ambiguous: bool | str = ..., nonexistent: str = ...
) -> _DatetimeT: ...
def day_name(self, locale: str | None = ...) -> str: ...
def month_name(self, locale: str | None = ...) -> str: ...
@property
def day_of_week(self) -> int: ...
@property
def dayofweek(self) -> int: ...
@property
def day_of_month(self) -> int: ...
@property
def day_of_year(self) -> int: ...
@property
def dayofyear(self) -> int: ...
@property
def quarter(self) -> int: ...
@property
def week(self) -> int: ...
def to_numpy(
self, dtype: np.dtype | None = ..., copy: bool = ...
) -> np.datetime64: ...
@property
def _date_repr(self) -> str: ...
@property
def days_in_month(self) -> int: ...
@property
def daysinmonth(self) -> int: ...
def _as_unit(self, unit: str, round_ok: bool = ...) -> Timestamp: ...
|
[
"noreply@github.com"
] |
JMBurley.noreply@github.com
|
4c011ed2da70f655d3aa386df74a1e326d494d21
|
1ada3010856e39c93e2483c960aa8fc25e2b3332
|
/Binary Tree/FullBT.py
|
034233595a63dbed3ebcff43dae5e8c12860eb95
|
[] |
no_license
|
Taoge123/LeetCode
|
4f9e26be05f39b37bdbb9c1e75db70afdfa1b456
|
4877e35a712f59bc7b8fffa3d8af2ffa56adb08c
|
refs/heads/master
| 2022-02-24T20:09:21.149818
| 2020-07-31T03:18:05
| 2020-07-31T03:18:05
| 142,700,689
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 965
|
py
|
class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def isFullTree(root):
if root is None:
return True
#If leaf node
if root.left is None and root.right is None:
return True
if root.left is not None and root.right is not None:
return (isFullTree(root.left) and isFullTree(root.right))
return False
root = Node(10);
root.left = Node(20);
root.right = Node(30);
root.left.right = Node(40);
root.left.left = Node(50);
root.right.left = Node(60);
root.right.right = Node(70);
root.left.left.left = Node(80);
root.left.left.right = Node(90);
root.left.right.left = Node(80);
root.left.right.right = Node(90);
root.right.left.left = Node(80);
root.right.left.right = Node(90);
root.right.right.left = Node(80);
# root.right.right.right = Node(90);
if isFullTree(root):
print("The Binary tree is full")
else:
print("Binary tree is not full")
|
[
"taocheng984@gmail.com"
] |
taocheng984@gmail.com
|
ccce9383b7b87da27d4a190b8045a110fb1240bc
|
5a42ce780721294d113335712d45c62a88725109
|
/project/pyalg_api/commands/orient_command.py
|
67982f7aa800805ca33b77737fa43bda3887f4af
|
[] |
no_license
|
P79N6A/project_code
|
d2a933d53deb0b4e0bcba97834de009e7bb78ad0
|
1b0e863ff3977471f5a94ef7d990796a9e9669c4
|
refs/heads/master
| 2020-04-16T02:06:57.317540
| 2019-01-11T07:02:05
| 2019-01-11T07:02:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 746
|
py
|
# -*- coding: utf-8 -*-
import json
import os
import pandas as pd
import pyorient
import pdb
import random
import re
from datetime import datetime, timedelta
from lib.logger import logger
from .base_command import BaseCommand
from model.open import OpenJxlStat
from model.antifraud import AfWsm
from module.yiyiyuan import YiUserRemitList
from module.yiyiyuan import YiFavoriteContact
from module.detail import Detail
from lib.ssdb_config import SsdbConfig
class OrientCommand(BaseCommand):
def __init__(self):
super(OrientCommand, self).__init__()
self.client = None
# ORIENT DB
def runorient(self,start_time = None , end_time = None):
# 连接数据库
self.contactOrient()
return True
|
[
"wangyongqiang@ihsmf.com"
] |
wangyongqiang@ihsmf.com
|
b94122e661a89d99930688d0073fd4d58d5439c7
|
f000fa4e6ef1de9591eeabff43ba57b7bf32561d
|
/cephlcm/api/views/v1/permission.py
|
98322faa2514b82357002f20746813ca09c7ca49
|
[] |
no_license
|
VictorDenisov/ceph-lcm
|
1aca07f2d17bfda8760d192ffd6d17645705b6e4
|
3cfd9ced6879fca1c39039e195d22d897ddcde80
|
refs/heads/master
| 2021-01-15T09:19:23.723613
| 2016-09-17T01:18:45
| 2016-09-17T01:18:45
| 68,424,913
| 0
| 0
| null | 2016-09-17T01:17:36
| 2016-09-17T01:17:36
| null |
UTF-8
|
Python
| false
| false
| 502
|
py
|
# -*- coding: utf-8 -*-
"""Small API to list permissions available in application."""
from cephlcm.api import auth
from cephlcm.api.views import generic
from cephlcm.common.models import role
class PermissionView(generic.ModelView):
decorators = [
auth.require_authorization("api", "view_role"),
auth.require_authentication
]
NAME = "permission"
ENDPOINT = "/permission/"
def get(self):
return role.PermissionSet(role.PermissionSet.KNOWN_PERMISSIONS)
|
[
"sarkhipov@mirantis.com"
] |
sarkhipov@mirantis.com
|
5a5e361c2eba01070a0d3e17a30153188ba0779f
|
9b64f0f04707a3a18968fd8f8a3ace718cd597bc
|
/huaweicloud-sdk-waf/huaweicloudsdkwaf/v1/model/list_value_list_response.py
|
19cf334e308e193d4391dbf7c5b6693f27dd4bcc
|
[
"Apache-2.0"
] |
permissive
|
jaminGH/huaweicloud-sdk-python-v3
|
eeecb3fb0f3396a475995df36d17095038615fba
|
83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b
|
refs/heads/master
| 2023-06-18T11:49:13.958677
| 2021-07-16T07:57:47
| 2021-07-16T07:57:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,469
|
py
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ListValueListResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'total': 'int',
'items': 'list[ValueList]'
}
attribute_map = {
'total': 'total',
'items': 'items'
}
def __init__(self, total=None, items=None):
"""ListValueListResponse - a model defined in huaweicloud sdk"""
super(ListValueListResponse, self).__init__()
self._total = None
self._items = None
self.discriminator = None
if total is not None:
self.total = total
if items is not None:
self.items = items
@property
def total(self):
"""Gets the total of this ListValueListResponse.
引用表条数
:return: The total of this ListValueListResponse.
:rtype: int
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this ListValueListResponse.
引用表条数
:param total: The total of this ListValueListResponse.
:type: int
"""
self._total = total
@property
def items(self):
"""Gets the items of this ListValueListResponse.
引用表列表
:return: The items of this ListValueListResponse.
:rtype: list[ValueList]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this ListValueListResponse.
引用表列表
:param items: The items of this ListValueListResponse.
:type: list[ValueList]
"""
self._items = items
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListValueListResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
8f39586d925238cc0f8aaf1533ed11e6bba15271
|
88b4b883c1a262b5f9ca2c97bf1835d6d73d9f0b
|
/src/api/python/hce/app/UrlNormalize.py
|
d2cfe91a54afb21b110f70ed0b308b85d4c67660
|
[] |
no_license
|
hce-project/hce-bundle
|
2f93dc219d717b9983c4bb534884e4a4b95e9b7b
|
856a6df2acccd67d7af640ed09f05b2c99895f2e
|
refs/heads/master
| 2021-09-07T22:55:20.964266
| 2018-03-02T12:00:42
| 2018-03-02T12:00:42
| 104,993,955
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,593
|
py
|
# coding: utf-8
"""
HCE project, Python bindings, Distributed Tasks Manager application.
UrlNormalize Class content main functional of support the URL_NORMALIZE properties.
@package: app
@file UrlNormalize.py
@author Alexander Vybornyh <alexander.hce.cluster@gmail.com>
@link: http://hierarchical-cluster-engine.com/
@copyright: Copyright © 2013-2017 IOIX Ukraine
@license: http://hierarchical-cluster-engine.com/license/
@since: 0.1
"""
import re
import app.Consts as APP_CONSTS
import app.Utils as Utils
class UrlNormalize(object):
# Constants used in class
PROPERTY_OPTIONS_MASK = 'mask'
PROPERTY_OPTIONS_REPLACE = 'replace'
# Constants of error messages
ERROR_MSG_FAILED_REPLACE = "Operation replace failed. Error: %s"
# Initialization
def __init__(self):
pass
## get normalize mask
#
# @param siteProperties - site properties
# @param defaultValue - default value
# @return normalize mask
@staticmethod
def getNormalizeMask(siteProperties, defaultValue=Utils.UrlNormalizator.NORM_DEFAULT):
# variable for result
ret = defaultValue
if siteProperties is not None and isinstance(siteProperties, dict) and APP_CONSTS.URL_NORMALIZE in siteProperties and \
isinstance(siteProperties[APP_CONSTS.URL_NORMALIZE], dict) and UrlNormalize.PROPERTY_OPTIONS_MASK in siteProperties[APP_CONSTS.URL_NORMALIZE]:
ret = int(siteProperties[APP_CONSTS.URL_NORMALIZE][UrlNormalize.PROPERTY_OPTIONS_MASK])
return ret
# # execute normalization url string use base url
#
# @param siteProperties - site properties
# @param base - base url string
# @param url - url string
# @param supportProtocols - support protocol list
# @param log - logger instance
# @return already normalized url string or None - in case of bad result normalization
@staticmethod
def execute(siteProperties, base, url, supportProtocols=None, log=None):
# check site property for exist replace rule
if siteProperties is not None and isinstance(siteProperties, dict) and APP_CONSTS.URL_NORMALIZE in siteProperties:
if log is not None:
log.info("!!! siteProperties['%s']: '%s', type: %s", str(APP_CONSTS.URL_NORMALIZE), str(siteProperties[APP_CONSTS.URL_NORMALIZE]),
str(type(siteProperties[APP_CONSTS.URL_NORMALIZE])))
replaceList = []
propertyDict = {}
if isinstance(siteProperties[APP_CONSTS.URL_NORMALIZE], basestring):
propertyDict = Utils.jsonLoadsSafe(jsonString=siteProperties[APP_CONSTS.URL_NORMALIZE], default=propertyDict, log=log)
if isinstance(propertyDict, dict) and UrlNormalize.PROPERTY_OPTIONS_REPLACE in propertyDict:
replaceList = propertyDict[UrlNormalize.PROPERTY_OPTIONS_REPLACE]
if log is not None:
log.debug("!!! replaceList: %s", str(replaceList))
if isinstance(replaceList, list):
for replaceElem in replaceList:
if isinstance(replaceElem, dict):
for pattern, repl in replaceElem.items():
try:
if log is not None:
log.debug("!!! pattern: %s, url: %s", str(pattern), str(url))
url = re.sub(pattern=pattern, repl=repl, string=url, flags=re.U + re.I)
if log is not None:
log.debug("!!! res url: %s", str(url))
except Exception, err:
if log is not None:
log.error(UrlNormalize.ERROR_MSG_FAILED_REPLACE, str(err))
return Utils.urlNormalization(base=base, url=url, supportProtocols=supportProtocols, log=log)
|
[
"bgv@bgv-d9"
] |
bgv@bgv-d9
|
3745a3536c649d6903183d1fd0fc4de53df98f5c
|
c9a222631e4a0b827ee4efbd4e362d00b7cc6d48
|
/demo/画方格/rose.py
|
837c42354e6f408ee364e1570706b3fe5d0bab0e
|
[] |
no_license
|
enticejin/python
|
d86b1727048bae24bce0fedc911953a20d11947c
|
09dea6c62e6be8389fb23f472a1f02896a74c696
|
refs/heads/master
| 2023-03-12T18:00:06.322335
| 2021-11-09T01:38:13
| 2021-11-09T01:38:13
| 234,876,815
| 3
| 1
| null | 2023-03-04T01:24:08
| 2020-01-19T09:54:07
|
Python
|
UTF-8
|
Python
| false
| false
| 1,437
|
py
|
from turtle import *
import time
setup(1000,800,0,0)
speed(0)
penup()
seth(90)
fd(340)
seth(0)
pendown()
speed(5)
begin_fill()
fillcolor('red')
circle(50,30)
for i in range(10):
fd(1)
left(10)
circle(40,40)
for i in range(6):
fd(1)
left(3)
circle(80,40)
for i in range(20):
fd(0.5)
left(5)
circle(80,45)
for i in range(10):
fd(2)
left(1)
circle(80,25)
for i in range(20):
fd(1)
left(4)
circle(50,50)
time.sleep(0.1)
circle(120,55)
speed(0)
seth(-90)
fd(70)
right(150)
fd(20)
left(140)
circle(140,90)
left(30)
circle(160,100)
left(130)
fd(25)
penup()
right(150)
circle(40,80)
pendown()
left(115)
fd(60)
penup()
left(180)
fd(60)
pendown()
end_fill()
right(120)
circle(-50,50)
circle(-20,90)
speed(1)
fd(75)
speed(0)
circle(90,110)
penup()
left(162)
fd(185)
left(170)
pendown()
circle(200,10)
circle(100,40)
circle(-52,115)
left(20)
circle(100,20)
circle(300,20)
speed(1)
fd(250)
penup()
speed(0)
left(180)
fd(250)
circle(-300,7)
right(80)
circle(200,5)
pendown()
left(60)
begin_fill()
fillcolor('green')
circle(-80,100)
right(90)
fd(10)
left(20)
circle(-63,127)
end_fill()
penup()
left(50)
fd(20)
left(180)
pendown()
circle(200,25)
penup()
right(150)
fd(180)
right(40)
pendown()
begin_fill()
fillcolor('green')
circle(-100,80)
right(150)
fd(10)
left(60)
circle(-80,98)
end_fill()
penup()
left(60)
fd(13)
left(180)
pendown()
speed(1)
circle(-200,23)
exitonclick()
|
[
"www.403367632@qq.com"
] |
www.403367632@qq.com
|
353284559c12cf179d807a4ba9ac588bd8f495d5
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2533/59137/270093.py
|
f1c8a2c861bf977ab43894cd1ef420e8d96bcd4a
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 221
|
py
|
def s1():
array = list(eval(input()))
ans = []
for n in array:
if n % 2 == 0:
ans.append(n)
for n in array:
if n % 2 == 1:
ans.append(n)
print(ans)
s1()
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
f142571b2aee06f9277b92ef1709a27a21e74f6a
|
f6290b7b8ffb263b7f0d252a67e2c6320a4c1143
|
/Recursion/rat_in_a_maze.py
|
f7cc926fed7bed41572c85590758f250caa32735
|
[] |
no_license
|
datAnir/GeekForGeeks-Problems
|
b45b0ae80053da8a1b47a2af06e688081574ef80
|
c71f11d0349ed3850dfaa9c7a078ee70f67e46a1
|
refs/heads/master
| 2023-05-29T15:21:59.680793
| 2020-12-15T04:55:01
| 2020-12-15T04:55:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,655
|
py
|
'''
https://practice.geeksforgeeks.org/problems/rat-in-a-maze-problem/1
Consider a rat placed at (0, 0) in a square matrix of order N*N. It has to reach the destination at (n-1, n-1).
Find all possible paths that the rat can take to reach from source to destination. The directions in which the rat can move are 'U'(up), 'D'(down), 'L' (left), 'R' (right).
Value 0 at a cell in the matrix represents that it is blocked and cannot be crossed while value 1 at a cell in the matrix represents that it can be travelled through.
Expected Time Complexity: O((N2)4).
Expected Auxiliary Space: O(L*X), L = length of the path, X = number of paths.
Input:
3
4
1 0 0 0 1 1 0 1 0 1 0 0 0 1 1 1
4
1 0 0 0 1 1 0 1 1 1 0 0 0 1 1 1
2
1 0 1 0
Output:
DRDDRR
DDRDRR DRDDRR
-1
'''
# if row or col goes out of bound or arr[r][c] = -1(visited) or arr[r][c] = 0(blocked), then base cond hit so return
# if we reach at bottom right corner, then print path and return
# else change arr[i][j] = -1(visited) and call in all 4 directions
# after processing all 4 directions, make arr[i][j] = 1 again so that same cell can be used by other path
def ratMaze(arr, i, j, path, ans):
if i < 0 or j < 0 or i >= len(arr) or j >= len(arr) or arr[i][j] <= 0:
return
if i == len(arr)-1 and j == len(arr)-1:
ans.append(path)
return
arr[i][j] = -1
ratMaze(arr, i-1, j, path + 'U', ans)
ratMaze(arr, i+1, j, path + 'D', ans)
ratMaze(arr, i, j-1, path + 'L', ans)
ratMaze(arr, i, j+1, path + 'R', ans)
arr[i][j] = 1
def findPath(arr, n):
ans = []
ratMaze(arr, 0, 0, '', ans)
return ' '.join(sorted(ans))
|
[
"komalbansal97@gmail.com"
] |
komalbansal97@gmail.com
|
5bbcbb82c02d8c1cfa203245472a07dafc8af5ca
|
81f7f4a65a068ed2483b537f6675a5f46235af88
|
/inplace_activity_stream/urls.py
|
b6cd2b3e850c994e4c20377bdb3795bf119cb6be
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
ebrelsford/django-inplace-activity-stream
|
f561c7cf0c7180426d3ea9cd11abba1cb6744e60
|
a495e42ffdc37d5e800f71ab97ed6975a1849224
|
refs/heads/master
| 2020-12-24T13:16:17.119724
| 2017-05-09T14:09:09
| 2017-05-09T14:09:09
| 11,783,325
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 320
|
py
|
from django.conf.urls import url
from .views import PlaceActivityFeed, PlaceActivityListView
urlpatterns = [
url(r'^feeds/all/$',
PlaceActivityFeed(),
name='activitystream_feed',
),
url(r'^',
PlaceActivityListView.as_view(),
name='activitystream_activity_list'
),
]
|
[
"ebrelsford@gmail.com"
] |
ebrelsford@gmail.com
|
8cb1198739853e29689237999b1e3c3375af0e06
|
31e00afe8f782bd214f8e32949be928a51e5de39
|
/CreditCalculator/Credit Calculator/task/creditcalc/creditcalc.py
|
a56e2de2966fd6bf16c2ab712fd91ba2d6ad1650
|
[] |
no_license
|
akocur/education
|
65e2a0640bab5e9939c5692333fa2f500c9feb0b
|
d0a890861cd83dcc61ff2af6cfbb75c157fbaf02
|
refs/heads/master
| 2023-05-24T02:38:04.695665
| 2020-10-26T14:09:07
| 2020-10-26T14:09:07
| 269,410,671
| 0
| 0
| null | 2023-05-22T22:47:45
| 2020-06-04T16:32:11
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,761
|
py
|
from math import log, ceil, floor
import argparse
class CreditCalculator:
def __init__(self, type_calc='annuity', payment=0, periods=0, interest=0, principal=0):
self.type = type_calc
self.payment = payment
self.periods = periods
self.interest = interest
self.principal = principal
self.interest_rate = self.interest / 1200
self.total_payments = 0
def __repr__(self):
return f'''
type: {self.type}
payment: {self.payment}
periods: {self.periods}
interest: {self.interest}
principal: {self.principal}
interest_rate: {self.interest_rate}
total_payments: {self.total_payments}
'''
def overpayment(self):
print(f'\nOverpayment = {self.total_payments - self.principal}')
def are_errors(self):
if self.type not in ['annuity', 'diff']:
return True
if self.interest <= 0 or self.payment < 0 or self.periods < 0 or self.principal < 0:
return True
if self.type == 'annuity':
if self.payment == self.periods == self.principal == 0:
return True
else:
if self.payment:
return True
def calculate(self):
if self.are_errors():
print('Incorrect parameters')
return
if self.type == 'annuity':
if self.principal == 0:
self.principal = floor(self.payment * (pow(1 + self.interest_rate, self.periods) - 1) /
(self.interest_rate * pow(1 + self.interest_rate, self.periods)))
print(f'Your credit principal = {self.principal}!')
elif self.payment == 0:
self.payment = ceil(self.principal * self.interest_rate * pow(1 + self.interest_rate, self.periods) /
(pow(1 + self.interest_rate, self.periods) - 1))
print(f'Your annuity payment = {self.payment}!')
elif self.periods == 0:
self.periods = ceil(log(self.payment / (self.payment - self.interest_rate * self.principal),
1 + self.interest_rate))
n_years = self.periods // 12
n_month = self.periods % 12
n_years_str = '' if n_years < 1 else f'{n_years} year{"s" if n_years > 1 else ""}'
and_str = ' and ' if n_years > 0 and n_month > 0 else ''
n_month_str = '' if n_month < 1 else f'{n_month} month{"s" if n_month > 1 else ""}'
print(f'You need {n_years_str}{and_str}{n_month_str} to repay this credit!')
self.total_payments = self.payment * self.periods
elif self.type == 'diff':
sum_payment = 0
for month in range(1, self.periods + 1):
payment = ceil(self.principal / self.periods + self.interest_rate * (self.principal - self.principal *
(month - 1) / self.periods))
print(f'Month {month}: paid out {payment}')
sum_payment += payment
self.total_payments = sum_payment
self.overpayment()
parser_args = argparse.ArgumentParser()
parser_args.add_argument('--type')
parser_args.add_argument('--payment', type=int, default=0)
parser_args.add_argument('--principal', type=int, default=0)
parser_args.add_argument('--periods', type=int, default=0)
parser_args.add_argument('--interest', type=float, default=0)
args = parser_args.parse_args()
credit_calc = CreditCalculator(args.type, args.payment, args.periods, args.interest, args.principal)
credit_calc.calculate()
|
[
"akocur@yandex.ru"
] |
akocur@yandex.ru
|
5b68f9839dd7d14e82c6f535fb0febdf0d995910
|
3bae1ed6460064f997264091aca0f37ac31c1a77
|
/extensions/logserver/ScribedCommand.py
|
3d234d75264afabac9b90bffa6679d4182ed3010
|
[] |
no_license
|
racktivity/ext-pylabs-core
|
04d96b80ac1942754257d59e91460c3a141f0a32
|
53d349fa6bee0ccead29afd6676979b44c109a61
|
refs/heads/master
| 2021-01-22T10:33:18.523799
| 2017-06-08T09:09:28
| 2017-06-08T09:09:28
| 54,314,984
| 0
| 0
| null | 2017-06-08T09:09:29
| 2016-03-20T11:55:01
|
Python
|
UTF-8
|
Python
| false
| false
| 4,544
|
py
|
from pylabs import q
from pylabs.inifile import IniFile
from pylabs.baseclasses.CommandWrapper import CommandWrapper
from pylabs.enumerators import AppStatusType
import time
class ScribedCommand(CommandWrapper):
"""
A basic ScribedCommandWrapper to start/stop/restart the Scribe server
"""
# def _getPidFile(self):
# return q.system.fs.joinPaths(q.dirs.pidDir, "scribed.pid")
def _getScribedBinary(self):
return q.system.fs.joinPaths(q.dirs.binDir,"scribed")
def _getScribeCTRLBinary(self):
return q.system.fs.joinPaths(q.dirs.binDir, "scribe_ctrl")
def _getDefaultConfigFile(self):
return q.system.fs.joinPaths(q.dirs.cfgDir, 'scribe_logserver.conf')
def _getPort(self):
serverIniFile = IniFile(q.system.fs.joinPaths(q.dirs.cfgDir, 'qconfig', 'logservermain.cfg'))
return serverIniFile.getValue('main', 'port')
def _getStatus(self, port):
#@todo: use the status coammand instead of the version command to get the server status once the status problem resolved
command = "%(SCRIBECTRLCommand)s version %(port)s" % {"SCRIBECTRLCommand":self._getScribeCTRLBinary(), "port":port}
exitCode, output = q.system.process.execute(command, dieOnNonZeroExitCode = False, outputToStdout = False)
#status command returns 2 if scribe is alive else returns 3 ?????
if exitCode :
return AppStatusType.HALTED
return AppStatusType.RUNNING
def start(self, configFile = None, timeout = 5):
"""
Start Scribe Server
@param configFile: configuration file for describing the different stores
@type configFile: string
"""
port = self._getPort()
if self._getStatus(port) == AppStatusType.RUNNING:
q.console.echo('Scribe Server on port %s already running'%port)
return
if not configFile:
configFile = self._getDefaultConfigFile()
q.logger.log('Starting scribe server with port %s using config file %s'%(port, configFile), 5)
command = "%(SCRIBEDCommand)s -p %(port)s -c %(configFile)s 2> %(scribeout)s&" % {"SCRIBEDCommand":self._getScribedBinary(), "port": port, "configFile":configFile, 'scribeout': q.system.fs.joinPaths(q.dirs.logDir, 'logserver.out')}
exitCode, output = q.system.process.execute(command, dieOnNonZeroExitCode = False, outputToStdout = True)
t = timeout
started = False
while t>0:
if q.system.process.checkProcess('bin/scribed') == 0:
started = True
break
t = t - 1
time.sleep(1)
if not started:
q.logger.log("Scribe could not be started in %d seconds" % timeout, 8)
raise RuntimeError("Scribe could not be started in %d seconds" % timeout)
q.logger.log('Scribe server on port %s and config file %s started Successfully'%(port, configFile), 3)
q.console.echo("Scribe started successfully.")
def stop(self):
"""
Stop Scribe Server
"""
port = self._getPort()
if self._getStatus(port) == AppStatusType.HALTED:
q.console.echo('Scribe Server on port %s is not running'%port)
return
command = "%(SCRIBECTRLCommand)s stop %(port)s" % {"SCRIBECTRLCommand":self._getScribeCTRLBinary(), "port":port}
exitCode, output = q.system.process.execute(command, dieOnNonZeroExitCode = False, outputToStdout = True)
if exitCode and output:
raise RuntimeError("Scribe could not be stopped. Reason: %s"%output)
q.console.echo("Scribe stopped successfully")
def restart(self):
"""
Restart Scribe Server
"""
self.stop()
self.start()
def getStatus(self):
"""
Check the live status of the scribe server
"""
return self._getStatus(self._getPort())
def getDetailedStatus(self):
"""
Used the status command to get detailed status of the scribe server
"""
command = "%(SCRIBECTRLCommand)s status %(port)s" % {"SCRIBECTRLCommand":self._getScribeCTRLBinary(), "port":self._getPort()}
exitCode, output = q.system.process.execute(command, dieOnNonZeroExitCode = False, outputToStdout = False)
#status command returns 2 if scribe is alive else returns 3 ?????
if exitCode == 3:
return AppStatusType.HALTED
return AppStatusType.RUNNING
|
[
"devnull@localhost"
] |
devnull@localhost
|
370c92a901face3a9787726692e591f2249350aa
|
b5921afe6ea5cd8b3dcfc83147ab5893134a93d0
|
/tl/plugs/timeline/klacht.py
|
1834e62f30873b8c7ab9090c22771b0cad15963c
|
[
"LicenseRef-scancode-other-permissive"
] |
permissive
|
techdragon/tl
|
aaeb46e18849c04ad436e0e786401621a4be82ee
|
6aba8aeafbc92cabdfd7bec11964f7c3f9cb835d
|
refs/heads/master
| 2021-01-17T16:13:18.636457
| 2012-11-02T10:08:10
| 2012-11-02T10:08:10
| 9,296,808
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,237
|
py
|
# tl/plugs/timeline/klacht.py
#
#
""" het timeline klachten command. """
## tl imports
from tl.utils.name import stripname
from tl.lib.datadir import getdatadir
from tl.lib.persist import TimedPersist, PersistCollection
from tl.lib.commands import cmnds
from tl.lib.examples import examples
from tl.id import get_uid, get_id
## basic imports
import logging
import time
import os
## getklachtdir function
def getklachtdir(username):
return os.path.join(getdatadir(), "timeline", stripname(username), "klacht")
## Klacht class
class Klacht(TimedPersist):
def __init__(self, username, klacht, default={}, ddir=None, origin=None, *args, **kwargs):
TimedPersist.__init__(self, username, default=default, ddir=getklachtdir(username), *args, **kwargs)
self.data.klacht = self.data.klacht or klacht or "geen text gegeven"
self.data.username = self.data.username or username or "anon"
self.data.uid = self.data.uid or get_uid(username)
self.data.origin = self.data.origin or origin or get_id()
class Klachten(PersistCollection): pass
## complaint command
def handle_klacht(bot, event):
if not event.rest: event.reply("waar wil je over klagen?") ; return
k = Klacht(event.user.data.name, event.rest)
k.save()
event.reply("klacht is genoteerd op %s" % time.ctime(k.data.created))
cmnds.add("klacht", handle_klacht, ["OPER", "USER", "GUEST"])
examples.add(
"klacht",
"met het klacht commando kan je laten registeren wat je intiept, een overzicht kan je krijgen door het klachten commando te geven",
"klacht die GGZ NHN is maar een rukkerig zooitje proviteurs die betaalt krijgen om nee te zeggen"
)
def handle_klachten(bot, event):
klachten = Klachten(getklachtdir(event.user.data.name))
result = []
for k in klachten.dosort(): result.append("%s - %s" % (k.data.klacht, time.ctime(k.data.created)))
if result: event.reply("klachten van %s: " % event.user.data.name, result, dot="indent", nosort=True)
else: event.reply("ik kan nog geen klachten vinden voor %s" % event.uid)
cmnds.add("klachten", handle_klachten, ["OPER", "USER", "GUEST"])
examples.add("klachten", "laat alle klachten zien", "klachten")
|
[
"feedbackflow@gmail.com"
] |
feedbackflow@gmail.com
|
c830aa73b9ef83c3a14fb5861a563fa13de4758f
|
f2a5311fdca8d71535565e1ec3fc2b79e55ab7aa
|
/main2d.py
|
c35349151b55013dad39d01b3ed3cb0a83619153
|
[] |
no_license
|
xzxzmmnn/pytorch-convcnp
|
59bd9036cea88479862a622749408fd73f9d132b
|
31340d5cf4b537a240075f93c8d4aff6f10d8931
|
refs/heads/master
| 2020-12-28T11:53:13.337832
| 2020-01-01T07:58:27
| 2020-01-01T07:58:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,307
|
py
|
import argparse
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import torchvision.transforms as tf
from torchvision.datasets import MNIST, CIFAR10
from fastprogress import master_bar, progress_bar
from convcnp import ConvCNP2d, channel_last
from visualize import plot_all_2d, convert_tfboard
def train(model, dataloader, optimizer):
model.train()
avg_loss = 0
for index, (I, _) in enumerate(progress_bar(dataloader, parent=args.mb)):
I = I.to(args.device)
optimizer.zero_grad()
pred_dist = model(I)
loss = - pred_dist.log_prob(channel_last((I))).sum(-1).mean()
loss.backward()
optimizer.step()
avg_loss -= loss.item() * I.size(0)
if index % 10 == 0:
args.mb.child.comment = 'loss={:.3f}'.format(loss.item())
return avg_loss / len(dataloader.dataset)
def validate(model, dataloader):
model.eval()
I, _ = iter(dataloader).next()
I = I.to(args.device)
with torch.no_grad():
Mc, f, dist = model.complete(I)
likelihood = dist.log_prob(channel_last(I)).sum(-1).mean()
rmse = (I - f).pow(2).mean()
image = plot_all_2d(I, Mc, f)
image = convert_tfboard(image)
return likelihood, rmse, image
def main():
if args.dataset == 'mnist':
trainset = MNIST('~/data/mnist', train=True, transform=tf.ToTensor())
testset = MNIST('~/data/mnist', train=False, transform=tf.ToTensor())
cnp = ConvCNP2d(channel=1)
elif args.dataset == 'cifar10':
trainset = CIFAR10('~/data/cifar10', train=True, transform=tf.ToTensor())
testset = CIFAR10('~/data/cifar10', train=False, transform=tf.ToTensor())
cnp = ConvCNP2d(channel=3)
trainloader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=8)
testloader = DataLoader(testset, batch_size=16, shuffle=True)
cnp = cnp.to(args.device)
optimizer = optim.Adam(cnp.parameters(), lr=args.learning_rate)
args.mb = master_bar(range(1, args.epochs + 1))
for epoch in args.mb:
avg_train_loss = train(cnp, trainloader, optimizer)
valid_ll, rmse, image = validate(cnp, testloader)
args.writer.add_scalar('train/likelihood', avg_train_loss, epoch)
args.writer.add_scalar('validate/likelihood', valid_ll, epoch)
args.writer.add_scalar('validate/rmse', rmse, epoch)
args.writer.add_image('validate/image', image, epoch)
torch.save(cnp.state_dict(), filename)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size', '-B', type=int, default=16)
parser.add_argument('--learning-rate', '-LR', type=float, default=5e-4)
parser.add_argument('--epochs', '-E', type=int, default=100)
parser.add_argument('--dataset', '-D', type=str, default='mnist', choices=['mnist', 'cifar10'])
parser.add_argument('--logging', default=False, action='store_true')
args = parser.parse_args()
filename = 'convcnp2d_{}.pth.gz'.format(args.dataset)
if torch.cuda.is_available():
args.device = torch.device('cuda')
else:
args.device = torch.device('cpu')
args.writer = SummaryWriter()
main()
args.writer.close()
|
[
"makoto.kawano@gmail.com"
] |
makoto.kawano@gmail.com
|
74ecd4a6a16f90612dffbb77095aa1099cf71add
|
033b29b6b1538d10e060e5734a1d7488a3fa03b4
|
/attic/objects/cards.py
|
f7891263e43957b288de677edd09d2b095312859
|
[
"MIT"
] |
permissive
|
yuechuanx/fluent-python-code-and-notes
|
f99967416abc9c46be50d95f822b2ef3609f2d2d
|
2ae19fff8e1d292c6e8d163c99ca63e07259499c
|
refs/heads/master
| 2023-08-09T22:14:22.985987
| 2022-08-28T09:06:32
| 2022-08-28T09:06:32
| 229,009,764
| 2
| 0
|
MIT
| 2023-07-20T15:11:59
| 2019-12-19T08:30:28
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,422
|
py
|
"""
Spadille is the nickname for the Ace of Spades in some games
(see `Webster 1913`_)
>>> beer_card = Card('7', Suite.diamonds)
>>> beer_card
Card('7', Suite.diamonds)
>>> spadille = Card('A', Suite.spades, long_rank='Ace')
>>> spadille
Card('A', Suite.spades)
>>> print(spadille)
Ace of spades
>>> bytes(spadille)
b'A\\x01'
>>> charles = Card('K', Suite.hearts)
>>> bytes(charles)
b'K\\x04'
>>> big_cassino = Card('10', Suite.diamonds)
>>> bytes(big_cassino)
b'T\\x02'
__ http://machaut.uchicago.edu/cgi-bin/WEBSTER.sh?WORD=spadille
"""
from enum import Enum
Suite = Enum('Suite', 'spades diamonds clubs hearts')
class Card:
def __init__(self, rank, suite, *, long_rank=None):
self.rank = rank
if long_rank is None:
self.long_rank = self.rank
else:
self.long_rank = long_rank
self.suite = suite
def __str__(self):
return '{long_rank} of {suite.name}'.format(**self.__dict__)
def __repr__(self):
constructor = '{cls.__name__}({args})'
args = '{0.rank!r}, Suite.{0.suite.name}'.format(self)
return constructor.format(cls=self.__class__, args=args)
def __bytes__(self):
if self.rank == '10':
rank_byte = b'T'
else:
rank_byte = bytes([ord(self.rank)])
return rank_byte + bytes([self.suite.value])
|
[
"xiaoyuechuanz@163.com"
] |
xiaoyuechuanz@163.com
|
8e227b0301c820f7de88c13b8aa9fea0299ee35c
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/lex-models_read_1/intent-version_get.py
|
3042cea737aa3ed08aa65f3330a3dc1253199319
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,430
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import execute_one_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/lex-models/get-intent-versions.html
if __name__ == '__main__':
"""
create-intent-version : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/lex-models/create-intent-version.html
delete-intent-version : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/lex-models/delete-intent-version.html
"""
parameter_display_string = """
# name : The name of the intent for which versions should be returned.
"""
add_option_dict = {}
#######################################################################
# setting option use
# ex: add_option_dict["setting_matching_parameter"] = "--owners"
# ex: add_option_dict["setting_key"] = "owner_id"
#######################################################################
# single parameter
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
#######################################################################
# parameter display string
add_option_dict["parameter_display_string"] = parameter_display_string
execute_one_parameter("lex-models", "get-intent-versions", "name", add_option_dict)
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
f6e55ac4950c328a97974e006332ebc0b2d7779d
|
9a4de72aab094c87cfee62380e7f2613545eecfb
|
/monitor/permissions.py
|
4a5ca730834d35cef0d5a306a239cd832e172907
|
[] |
no_license
|
jamesduan/asset
|
ed75765c30a5288aaf4f6c56bbf2c9a059105f29
|
f71cb623b5ba376309cb728ad5c291ced2ee8bfc
|
refs/heads/master
| 2021-01-10T00:06:41.120678
| 2017-05-27T11:40:48
| 2017-05-27T11:40:48
| 92,730,581
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 473
|
py
|
from rest_framework import permissions
from assetv2.settingsapi import GROUP_ID
SAFE_METHOD = ['GET']
class EventPermission(permissions.BasePermission):
def has_permission(self, request, view):
return True
class AlarmPermission(permissions.BasePermission):
def has_permission(self, request, view):
if request.user is None:
return False
if request.user.is_superuser or request.method in SAFE_METHOD:
return True
|
[
"duanlingxiao@yhd.com"
] |
duanlingxiao@yhd.com
|
93017e77935b215f3fe77019853de5c8710fb5da
|
5ab0a217ac64a4e73d7ccff834a73eecdae323c5
|
/chps3-5/5.09.2.py
|
07db151d59c6c3f2ae3361eb9a29d95d73b0929d
|
[] |
no_license
|
bolducp/My-Think-Python-Solutions
|
d90ea5c485e418f4a6b547fdd9c1d4c8adfe72b9
|
6c411af5a46ee167b8e4a3449aa4b18705bf1df5
|
refs/heads/master
| 2021-01-22T05:19:53.987082
| 2015-09-15T15:53:31
| 2015-09-15T15:53:31
| 41,259,726
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 152
|
py
|
def print_it():
print "hello"
def do_n(a_function, n):
if n <= 0:
return
a_function()
do_n(a_function, n-1)
do_n(print_it, 10)
|
[
"paigebolduc@gmail.com"
] |
paigebolduc@gmail.com
|
6f0edfece6342887493bb3cea706740038aa981b
|
9b8e2992a38f591032997b5ced290fe1acc3ad94
|
/assignment.py
|
9f83cc955c406a0296f5a3b75f6f088b6468147c
|
[] |
no_license
|
girishdhegde/aps-2020
|
c694443c10d0d572c8022dad5a6ce735462aaa51
|
fb43d8817ba16ff78f93a8257409d77dbc82ced8
|
refs/heads/master
| 2021-08-08T04:49:18.876187
| 2021-01-02T04:46:20
| 2021-01-02T04:46:20
| 236,218,152
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 326
|
py
|
cost = [[3, 2, 7],
[5, 1, 3],
[2, 7, 2]]
n = 3
dp = [float('inf') for i in range(2**n)]
dp[0] = 0
def count_set_bits(mask):
cnt = 0
while(mask != 0):
cnt += 1
mask &= (mask - 1)
return cnt
print(count_set_bits(7))
# for mask in range(2**n):
# x = count_set_bits(mask)
|
[
"girsihdhegde12499@gmail.com"
] |
girsihdhegde12499@gmail.com
|
d0ffc2ce73a4358770fcbd0d3aea0a21813f5eeb
|
005f02cb534bbf91fe634fcf401441e1179365c8
|
/10-Django Level 2/10.2-projektDy/projektDy/wsgi.py
|
18fe901d2208426855c0e488e7e4b4babad70dda
|
[] |
no_license
|
Ruxhino-B/django-deployment-example
|
220a39a456871a1bf42a64fd5b945731056fc7b9
|
e19713ac1e11af202152ad20d7c3c94891a77e83
|
refs/heads/master
| 2020-04-18T02:21:10.505691
| 2020-01-06T14:18:18
| 2020-01-06T14:25:25
| 167,159,223
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
"""
WSGI config for projektDy project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'projektDy.settings')
application = get_wsgi_application()
|
[
"ruxhino@gmail.com"
] |
ruxhino@gmail.com
|
03829e363688475ccad8963022ab6bfa1f2ae6ee
|
b47f5ca0a51cf59427b7bd12e9c85064a1e13e03
|
/easyci/commands/watch.py
|
4969cb582bbd2f55f16519477a4ad48cafb03c24
|
[
"MIT"
] |
permissive
|
naphatkrit/easyci
|
a490b57e601bcad6d2022834809dd60cb0902e0c
|
7aee8d7694fe4e2da42ce35b0f700bc840c8b95f
|
refs/heads/master
| 2016-09-02T01:14:28.505230
| 2015-09-09T00:26:25
| 2015-09-09T00:26:25
| 41,396,486
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 653
|
py
|
import click
from watchdog.observers import Observer
from easyci.file_system_events.tests_event_handler import TestsEventHandler
from easyci.utils import decorators
@click.command()
@click.pass_context
@decorators.print_markers
def watch(ctx):
"""Watch the directory for changes. Automatically run tests.
"""
vcs = ctx.obj['vcs']
event_handler = TestsEventHandler(vcs)
observer = Observer()
observer.schedule(event_handler, vcs.path, recursive=True)
observer.start()
click.echo('Watching directory `{path}`. Use ctrl-c to stop.'.format(path=vcs.path))
while observer.isAlive():
observer.join(timeout=1)
|
[
"naphat.krit@gmail.com"
] |
naphat.krit@gmail.com
|
2fe01f1669e635156e429787ec8e0f24864e4090
|
bd2fb6aa0e25dcc3f6c1511007f15f63d0d9fb55
|
/tests/functions/folding/test_addmod_mulmod.py
|
50aa1d78ec53446c84cb634c1d65af92aea219fc
|
[
"Apache-2.0"
] |
permissive
|
andrelfpinto/vyper
|
4b26a88686518eca3a829c172dd01dcd34b242e4
|
d9b73846aa14a6019faa4126ec7608acd05e480d
|
refs/heads/master
| 2022-11-08T06:38:59.104585
| 2020-06-23T01:13:50
| 2020-06-23T01:13:50
| 274,272,650
| 0
| 0
|
Apache-2.0
| 2020-06-23T00:39:05
| 2020-06-23T00:39:04
| null |
UTF-8
|
Python
| false
| false
| 868
|
py
|
import pytest
from hypothesis import assume, given, settings
from hypothesis import strategies as st
from vyper import ast as vy_ast
from vyper import functions as vy_fn
st_uint256 = st.integers(min_value=0, max_value=2 ** 256 - 1)
@pytest.mark.fuzzing
@settings(max_examples=50, deadline=1000)
@given(a=st_uint256, b=st_uint256, c=st_uint256)
@pytest.mark.parametrize('fn_name', ['uint256_addmod', 'uint256_mulmod'])
def test_modmath(get_contract, a, b, c, fn_name):
assume(c > 0)
source = f"""
@public
def foo(a: uint256, b: uint256, c: uint256) -> uint256:
return {fn_name}(a, b, c)
"""
contract = get_contract(source)
vyper_ast = vy_ast.parse_to_ast(f"{fn_name}({a}, {b}, {c})")
old_node = vyper_ast.body[0].value
new_node = vy_fn.DISPATCH_TABLE[fn_name].evaluate(old_node)
assert contract.foo(a, b, c) == new_node.value
|
[
"ben@hauser.id"
] |
ben@hauser.id
|
08b5a295b6ce6b6b955d67a08448ad20ee18b133
|
bad62c2b0dfad33197db55b44efeec0bab405634
|
/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_frc_identity_documents_async.py
|
d90abd8f45f9a581b5f6a733d8ccab64198aad68
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
test-repo-billy/azure-sdk-for-python
|
20c5a2486456e02456de17515704cb064ff19833
|
cece86a8548cb5f575e5419864d631673be0a244
|
refs/heads/master
| 2022-10-25T02:28:39.022559
| 2022-10-18T06:05:46
| 2022-10-18T06:05:46
| 182,325,031
| 0
| 0
|
MIT
| 2019-07-25T22:28:52
| 2019-04-19T20:59:15
|
Python
|
UTF-8
|
Python
| false
| false
| 8,085
|
py
|
# coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import pytest
import functools
from io import BytesIO
from devtools_testutils.aio import recorded_by_proxy_async
from azure.core.exceptions import ServiceRequestError
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer._generated.v2_1.models import AnalyzeOperationResult
from azure.ai.formrecognizer._response_handlers import prepare_prebuilt_models
from azure.ai.formrecognizer.aio import FormRecognizerClient
from azure.ai.formrecognizer import FormRecognizerApiVersion
from asynctestcase import AsyncFormRecognizerTest
from preparers import FormRecognizerPreparer
from preparers import GlobalClientPreparer as _GlobalClientPreparer
FormRecognizerClientPreparer = functools.partial(_GlobalClientPreparer, FormRecognizerClient)
class TestIdDocumentsAsync(AsyncFormRecognizerTest):
@FormRecognizerPreparer()
async def test_identity_document_bad_endpoint(self, **kwargs):
formrecognizer_test_api_key = kwargs.get("formrecognizer_test_api_key", None)
with open(self.identity_document_license_jpg, "rb") as fd:
my_file = fd.read()
with pytest.raises(ServiceRequestError):
client = FormRecognizerClient("http://notreal.azure.com", AzureKeyCredential(formrecognizer_test_api_key))
async with client:
poller = await client.begin_recognize_identity_documents(my_file)
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
async def test_damaged_file_bytes_fails_autodetect_content_type(self, **kwargs):
client = kwargs.pop("client")
damaged_pdf = b"\x50\x44\x46\x55\x55\x55" # doesn't match any magic file numbers
with pytest.raises(ValueError):
async with client:
poller = await client.begin_recognize_identity_documents(
damaged_pdf
)
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
async def test_damaged_file_bytes_io_fails_autodetect(self, **kwargs):
client = kwargs.pop("client")
damaged_pdf = BytesIO(b"\x50\x44\x46\x55\x55\x55") # doesn't match any magic file numbers
with pytest.raises(ValueError):
async with client:
poller = await client.begin_recognize_identity_documents(
damaged_pdf
)
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
async def test_passing_bad_content_type_param_passed(self, **kwargs):
client = kwargs.pop("client")
with open(self.identity_document_license_jpg, "rb") as fd:
my_file = fd.read()
with pytest.raises(ValueError):
async with client:
poller = await client.begin_recognize_identity_documents(
my_file,
content_type="application/jpeg"
)
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
async def test_auto_detect_unsupported_stream_content(self, **kwargs):
client = kwargs.pop("client")
with open(self.unsupported_content_py, "rb") as fd:
my_file = fd.read()
with pytest.raises(ValueError):
async with client:
poller = await client.begin_recognize_identity_documents(
my_file
)
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
@recorded_by_proxy_async
async def test_identity_document_stream_transform_jpg(self, client):
responses = []
def callback(raw_response, _, headers):
analyze_result = client._deserialize(AnalyzeOperationResult, raw_response)
extracted_id_document = prepare_prebuilt_models(analyze_result)
responses.append(analyze_result)
responses.append(extracted_id_document)
with open(self.identity_document_license_jpg, "rb") as fd:
my_file = fd.read()
async with client:
poller = await client.begin_recognize_identity_documents(
identity_document=my_file,
include_field_elements=True,
cls=callback
)
result = await poller.result()
raw_response = responses[0]
returned_model = responses[1]
id_document = returned_model[0]
actual = raw_response.analyze_result.document_results[0].fields
read_results = raw_response.analyze_result.read_results
document_results = raw_response.analyze_result.document_results
page_results = raw_response.analyze_result.page_results
self.assertFormFieldsTransformCorrect(id_document.fields, actual, read_results)
# check page range
assert id_document.page_range.first_page_number == document_results[0].page_range[0]
assert id_document.page_range.last_page_number == document_results[0].page_range[1]
# Check page metadata
self.assertFormPagesTransformCorrect(id_document.pages, read_results, page_results)
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
@recorded_by_proxy_async
async def test_identity_document_jpg_include_field_elements(self, client):
with open(self.identity_document_license_jpg, "rb") as fd:
id_document = fd.read()
async with client:
poller = await client.begin_recognize_identity_documents(id_document, include_field_elements=True)
result = await poller.result()
assert len(result) == 1
id_document = result[0]
self.assertFormPagesHasValues(id_document.pages)
for field in id_document.fields.values():
if field.name == "CountryRegion":
assert field.value == "USA"
continue
elif field.name == "Region":
assert field.value == "Washington"
else:
self.assertFieldElementsHasValues(field.value_data.field_elements, id_document.page_range.first_page_number)
@pytest.mark.live_test_only
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
async def test_identity_document_continuation_token(self, **kwargs):
client = kwargs.pop("client")
with open(self.identity_document_license_jpg, "rb") as fd:
id_document = fd.read()
async with client:
initial_poller = await client.begin_recognize_identity_documents(id_document)
cont_token = initial_poller.continuation_token()
poller = await client.begin_recognize_identity_documents(None, continuation_token=cont_token)
result = await poller.result()
assert result is not None
await initial_poller.wait() # necessary so azure-devtools doesn't throw assertion error
@FormRecognizerPreparer()
@FormRecognizerClientPreparer(client_kwargs={"api_version": FormRecognizerApiVersion.V2_0})
async def test_identity_document_v2(self, **kwargs):
client = kwargs.pop("client")
with open(self.identity_document_license_jpg, "rb") as fd:
id_document = fd.read()
with pytest.raises(ValueError) as e:
async with client:
await client.begin_recognize_identity_documents(id_document)
assert "Method 'begin_recognize_identity_documents' is only available for API version V2_1 and up" in str(e.value)
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
@recorded_by_proxy_async
async def test_pages_kwarg_specified(self, client):
with open(self.identity_document_license_jpg, "rb") as fd:
id_document = fd.read()
async with client:
poller = await client.begin_recognize_identity_documents(id_document, pages=["1"])
assert '1' == poller._polling_method._initial_response.http_response.request.query['pages']
result = await poller.result()
assert result
|
[
"noreply@github.com"
] |
test-repo-billy.noreply@github.com
|
e1d99bfb0a5b950802d604a203ed618117937fb1
|
55e13562203f2f24338a5e1f8bb543becf8df171
|
/lighttpd/upload.py
|
eabb90c5e76f9a50fe642e503391fc9342dd1fe7
|
[] |
no_license
|
jvanz/container-images
|
79cd1dd4ade141d733ec6923f1157c15159369ab
|
ff228722dcccb318def64d9bf485dc43ccafa0d8
|
refs/heads/master
| 2020-08-31T05:24:44.258927
| 2019-11-25T17:42:09
| 2019-11-25T17:42:09
| 218,603,819
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 547
|
py
|
#!/usr/bin/python3
import cgi
import cgitb
cgitb.enable(display=0, logdir="/var/log/lighttpd")
form = cgi.FieldStorage()
filefield = form["file"]
msg = "Failed to upload file!"
if filefield.filename:
with open(f"/tmp/{filefield.filename}", "w+b") as f:
f.write(filefield.file.read())
msg = "File uploaded!"
else:
msg = f"Cannot find file field. {filefield.name} = {filefield.filename}"
print("Content-Type: text/html")
print()
print("<html>")
print("<body>")
print(f"<H1>{msg}</H1>")
print("</body>")
print("</html>")
|
[
"jvanz@jvanz.com"
] |
jvanz@jvanz.com
|
3a5d04a7a9e08a83b8fe983908ca0ffb84378af0
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_006/ch73_2020_06_08_22_39_00_045794.py
|
1fffb73594f4be691ffc6cda3e85a35745493c0c
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 192
|
py
|
def remove_vogais(palavra):
vogais=["a", "e", "i", "o", "u"]
for i in palavra:
if i in vogais:
nova=palavra.replace(i, " ")
return nova
|
[
"you@example.com"
] |
you@example.com
|
eee85f7fa13c8c9a2568ff0b2c328fcd74c447dd
|
a3eb732ead7e1d10a85a88e42dc639eb16a40265
|
/instagram_api/request/base.py
|
e326afae24f36536b07fc4d0d96d658ed435c783
|
[
"MIT"
] |
permissive
|
carsam2021/instagram_api
|
7654c0f485c22935cf478016e46e65acbeda9344
|
b53f72db36c505a2eb24ebac1ba8267a0cc295bb
|
refs/heads/master
| 2023-03-16T14:06:27.515432
| 2020-10-17T04:39:19
| 2020-10-17T04:39:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,849
|
py
|
from typing import Dict, List
import json
from instagram_api.interfaces.api_request import ApiRequestInterface
from instagram_api.signatures import Signatures
__all__ = ['CollectionBase']
class CollectionBase:
def __init__(self, ig):
from instagram_api.instagram import Instagram
self._ig: Instagram = ig
@staticmethod
def _paginate_with_exclusion(request: ApiRequestInterface,
exclude_list: List[int],
rank_token: str,
limit: int = 30) -> ApiRequestInterface:
assert Signatures.is_valid_uuid(rank_token), f'`{rank_token}` is not a valid rank token.'
# Что-то тут не так в логике
if not exclude_list:
request.add_params(
count=str(limit),
)
return request.add_params(
count=str(limit),
exclude_list=json.dumps(exclude_list, separators=(',', ':')),
rank_token=rank_token,
)
@staticmethod
def _paginate_with_multi_exclusion(request: ApiRequestInterface,
exclude_groups: Dict[str, List[int]],
rank_token: str,
limit: int = 30) -> ApiRequestInterface:
assert Signatures.is_valid_uuid(rank_token), f'`{rank_token}` is not a valid rank token.'
if not exclude_groups:
request.add_params(
count=str(limit),
)
total_count = 0
for ids in exclude_groups.values():
total_count += len(ids)
return request.add_params(
count=str(limit),
exclude_list=json.dumps(exclude_groups, separators=(',', ':')),
rank_token=rank_token,
)
|
[
"root@proscript.ru"
] |
root@proscript.ru
|
c378c2866324df13f145039f1ca8f38f447aeb85
|
be3920640bbbdb055876f2c1f49c6cc2e81cbab2
|
/pyexample/tests/test_viz.py
|
a4cf5e98a83b2a45624536877e68475621765059
|
[
"BSD-3-Clause"
] |
permissive
|
phobson/pyexample
|
4993fe8f9b70e5224d09e878f137e94c8c1b4f60
|
133598954b514a80dc0f65c02c8740b626e569d2
|
refs/heads/new-branch
| 2021-12-02T12:29:32.156032
| 2021-11-24T19:33:21
| 2021-11-24T19:33:21
| 68,402,565
| 0
| 0
|
BSD-3-Clause
| 2018-01-19T19:44:27
| 2016-09-16T17:55:31
| null |
UTF-8
|
Python
| false
| false
| 905
|
py
|
import numpy
from matplotlib import pyplot
import pytest
from pyexample import viz
from . import helpers
BASELINE_DIR = 'baseline_images/test_viz'
TOLERANCE = 15
@pytest.fixture
def plot_data():
data = numpy.array([
3.113, 3.606, 4.046, 4.046, 4.710, 6.140, 6.978,
2.000, 4.200, 4.620, 5.570, 5.660, 5.860, 6.650,
6.780, 6.790, 7.500, 7.500, 7.500, 8.630, 8.710,
8.990, 9.850, 10.820, 11.250, 11.250, 12.200, 14.920,
16.770, 17.810, 19.160, 19.190, 19.640, 20.180, 22.970,
])
return data
@pytest.mark.mpl_image_compare(baseline_dir=BASELINE_DIR, tolerance=TOLERANCE)
@helpers.seed
def test_demo_plotting_function(plot_data):
x = numpy.random.uniform(size=len(plot_data))
fig = viz.demo_plotting_function(x, plot_data, ax=None)
assert isinstance(fig, pyplot.Figure)
return fig
|
[
"pmhobson@gmail.com"
] |
pmhobson@gmail.com
|
2fb5eec5f1b153b97c548b1814a2487ab20fc821
|
e2b9f2354c36bd1edfa141d29f60c13ea176c0fe
|
/2018/blaze/sl0thcoin/smarter/solve.py
|
a2ba36e35e7368629a46ee89ae5a80409bf445d9
|
[] |
no_license
|
Jinmo/ctfs
|
236d2c9c5a49d500e80ece4631a22c7fb32c3c3f
|
d225baef7942250a5ff15a3f2a9b7ad8501c7566
|
refs/heads/master
| 2021-07-24T15:17:05.489163
| 2021-07-05T16:05:15
| 2021-07-05T16:05:15
| 68,125,231
| 162
| 26
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 699
|
py
|
#!/usr/bin/python3
from hashlib import sha256
from itertools import permutations
import string
# Just looked EVM assembly for many hours to solve it.
if False:
keys = string.ascii_lowercase
for combs in permutations(keys, 4):
z = sha256(bytearray(combs)).hexdigest()
# x = x[::-1]
if z == hash or z == hash[::-1] or z == bytes.fromhex(hash)[::-1].hex() or z[::-1] == bytes.fromhex(hash)[::-1].hex():
print(combs)
exit()
keys = bytearray(bytes.fromhex("4419194e"))
for i in range(len(keys)):
keys[i] ^= 42
hash = 'a8c8af687609bf404c202ac1378e10cd19421e72c0a161edc56b53752326592a'
prefix = b"flag{mayb3_w3_"
suffix = b"_bett3r_t00ls}"
x = bytearray(prefix + keys + suffix)
print(x)
|
[
"santoky001@naver.com"
] |
santoky001@naver.com
|
139a12db6cc98827a46acc6950671f20e7643c71
|
480e33f95eec2e471c563d4c0661784c92396368
|
/CondTools/Hcal/test/dbwriteCastorElectronicsMap_cfg.py
|
b60f9e71df8fabf598fe481066fc52793c1f219b
|
[
"Apache-2.0"
] |
permissive
|
cms-nanoAOD/cmssw
|
4d836e5b76ae5075c232de5e062d286e2026e8bd
|
4eccb8a758b605875003124dd55ea58552b86af1
|
refs/heads/master-cmsswmaster
| 2021-01-23T21:19:52.295420
| 2020-08-27T08:01:20
| 2020-08-27T08:01:20
| 102,867,729
| 7
| 14
|
Apache-2.0
| 2022-05-23T07:58:09
| 2017-09-08T14:03:57
|
C++
|
UTF-8
|
Python
| false
| false
| 2,177
|
py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
process.MessageLogger=cms.Service("MessageLogger",
destinations=cms.untracked.vstring("cout"),
cout=cms.untracked.PSet(
threshold=cms.untracked.string("INFO")
)
)
process.load("CondCore.DBCommon.CondDBCommon_cfi")
#process.CondDBCommon.connect = cms.string('sqlite_file:CastorEmap.db')
process.CondDBCommon.DBParameters.authenticationPath = cms.untracked.string('/afs/cern.ch/cms/DB/conddb')
#process.CondDBCommon.connect = cms.string('oracle://cms_orcoff_prep/CMS_COND_31X_HCAL')
process.CondDBCommon.connect = cms.string('oracle://cms_orcon_prod/CMS_COND_31X_HCAL')
process.CondDBCommon.DBParameters.authenticationPath = cms.untracked.string('/nfshome0/popcondev/conddb')
process.source = cms.Source("EmptyIOVSource",
timetype = cms.string('runnumber'),
firstValue = cms.uint64(1),
lastValue = cms.uint64(1),
interval = cms.uint64(1)
)
process.es_ascii = cms.ESSource("CastorTextCalibrations",
input = cms.VPSet(cms.PSet(
object = cms.string('ElectronicsMap'),
file = cms.FileInPath('CondFormats/CastorObjects/data/emap_dcc_nominal_Run121872.txt')
))
)
process.PoolDBOutputService = cms.Service("PoolDBOutputService",
process.CondDBCommon,
timetype = cms.untracked.string('runnumber'),
# logconnect= cms.untracked.string('sqlite_file:log.db'),
#logconnect= cms.untracked.string('oracle://cms_orcoff_prep/CMS_COND_31X_POPCONLOG'),
logconnect= cms.untracked.string('oracle://cms_orcon_prod/CMS_COND_31X_POPCONLOG'),
toPut = cms.VPSet(cms.PSet(
record = cms.string('CastorElectronicsMapRcd'),
tag = cms.string('CastorElectronicsMap_v2.01_mc')
))
)
process.mytest = cms.EDAnalyzer("CastorElectronicsMapPopConAnalyzer",
record = cms.string('CastorElectronicsMapRcd'),
loggingOn= cms.untracked.bool(True),
SinceAppendMode=cms.bool(True),
Source=cms.PSet(
# firstSince=cms.untracked.double(300)
IOVRun=cms.untracked.uint32(1)
)
)
process.p = cms.Path(process.mytest)
|
[
"giulio.eulisse@gmail.com"
] |
giulio.eulisse@gmail.com
|
f6cbef75142bef6fca11beffb23c99d4b87e2dcb
|
6a95b330e1beec08b917ff45eccfd6be3fd4629f
|
/kubernetes/client/models/extensions_v1beta1_scale_status.py
|
2abdb17853310a267b8ff6d08d3f314a58e3068e
|
[
"Apache-2.0"
] |
permissive
|
TokkoLabs/client-python
|
f4a83d6540e64861b59e322c951380a670578d7f
|
f1ad9c6889105d8510472606c98f8d3807f82020
|
refs/heads/master
| 2023-07-14T01:36:46.152341
| 2017-12-21T21:32:11
| 2017-12-21T21:32:11
| 115,042,671
| 0
| 0
|
Apache-2.0
| 2021-08-06T03:29:17
| 2017-12-21T20:05:15
|
Python
|
UTF-8
|
Python
| false
| false
| 6,162
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ExtensionsV1beta1ScaleStatus(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'replicas': 'int',
'selector': 'dict(str, str)',
'target_selector': 'str'
}
attribute_map = {
'replicas': 'replicas',
'selector': 'selector',
'target_selector': 'targetSelector'
}
def __init__(self, replicas=None, selector=None, target_selector=None):
"""
ExtensionsV1beta1ScaleStatus - a model defined in Swagger
"""
self._replicas = None
self._selector = None
self._target_selector = None
self.discriminator = None
self.replicas = replicas
if selector is not None:
self.selector = selector
if target_selector is not None:
self.target_selector = target_selector
@property
def replicas(self):
"""
Gets the replicas of this ExtensionsV1beta1ScaleStatus.
actual number of observed instances of the scaled object.
:return: The replicas of this ExtensionsV1beta1ScaleStatus.
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""
Sets the replicas of this ExtensionsV1beta1ScaleStatus.
actual number of observed instances of the scaled object.
:param replicas: The replicas of this ExtensionsV1beta1ScaleStatus.
:type: int
"""
if replicas is None:
raise ValueError("Invalid value for `replicas`, must not be `None`")
self._replicas = replicas
@property
def selector(self):
"""
Gets the selector of this ExtensionsV1beta1ScaleStatus.
label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors
:return: The selector of this ExtensionsV1beta1ScaleStatus.
:rtype: dict(str, str)
"""
return self._selector
@selector.setter
def selector(self, selector):
"""
Sets the selector of this ExtensionsV1beta1ScaleStatus.
label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors
:param selector: The selector of this ExtensionsV1beta1ScaleStatus.
:type: dict(str, str)
"""
self._selector = selector
@property
def target_selector(self):
"""
Gets the target_selector of this ExtensionsV1beta1ScaleStatus.
label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
:return: The target_selector of this ExtensionsV1beta1ScaleStatus.
:rtype: str
"""
return self._target_selector
@target_selector.setter
def target_selector(self, target_selector):
"""
Sets the target_selector of this ExtensionsV1beta1ScaleStatus.
label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
:param target_selector: The target_selector of this ExtensionsV1beta1ScaleStatus.
:type: str
"""
self._target_selector = target_selector
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ExtensionsV1beta1ScaleStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"mehdy@google.com"
] |
mehdy@google.com
|
25d837ad1c315cf0467adfe2ff6b3178b0ee48a4
|
c50e7eb190802d7849c0d0cea02fb4d2f0021777
|
/src/k8s-extension/azext_k8s_extension/tests/latest/MockClasses.py
|
8a6313c9ce63728ba80823afc1570193e5138272
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
Azure/azure-cli-extensions
|
c1615b19930bba7166c282918f166cd40ff6609c
|
b8c2cf97e991adf0c0a207d810316b8f4686dc29
|
refs/heads/main
| 2023-08-24T12:40:15.528432
| 2023-08-24T09:17:25
| 2023-08-24T09:17:25
| 106,580,024
| 336
| 1,226
|
MIT
| 2023-09-14T10:48:57
| 2017-10-11T16:27:31
|
Python
|
UTF-8
|
Python
| false
| false
| 711
|
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core import AzCommandsLoader
class MockCommand:
def __init__(self):
self.cli_ctx = MockCLIContext()
class MockCLIContext:
def __init__(self):
self.cloud = MockCloud()
class MockCloud:
def __init__(self):
self.endpoints = Endpoints()
class Endpoints:
def __init__(self):
self.resource_manager = ""
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
d4904ca21f745e69b4cc262d2ecc00fba7d06012
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/A/anoukmpg/basic_twitter_scraperchavez_1.py
|
d364c9eb557ece039635cdb7b4d9fdd75b2b9d7f
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210
| 2013-09-30T17:02:59
| 2013-09-30T17:02:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,420
|
py
|
###################################################################################
# Twitter scraper - designed to be forked and used for more interesting things
###################################################################################
import scraperwiki
import simplejson
import urllib2
# Change QUERY to your search term of choice.
# Examples: 'newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = 'project x haren'
RESULTS_PER_PAGE = '100'
NUM_PAGES = 1000000
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=%s&rpp=%s&page=%s' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, page)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
#print result
data = {}
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
data['created_at'] = result['created_at']
print data['from_user'], data['text']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, failed to scrape %s' % base_url
break
###################################################################################
# Twitter scraper - designed to be forked and used for more interesting things
###################################################################################
import scraperwiki
import simplejson
import urllib2
# Change QUERY to your search term of choice.
# Examples: 'newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = 'project x haren'
RESULTS_PER_PAGE = '100'
NUM_PAGES = 1000000
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=%s&rpp=%s&page=%s' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, page)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
#print result
data = {}
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
data['created_at'] = result['created_at']
print data['from_user'], data['text']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, failed to scrape %s' % base_url
break
|
[
"pallih@kaninka.net"
] |
pallih@kaninka.net
|
901f3c3dd2303e61d8c714f457a19c57173a0f9b
|
0931696940fc79c4562c63db72c6cabfcb20884d
|
/Functions/Loading_Bar.py
|
6924207e641d4053b76d424b3c26fb97beb5c9b1
|
[] |
no_license
|
ivklisurova/SoftUni_Fundamentals_module
|
f847b9de9955c8c5bcc057bb38d57162addd6ad8
|
69242f94977c72005f04da78243a5113e79d6c33
|
refs/heads/master
| 2021-12-01T01:56:22.067928
| 2021-11-08T17:07:31
| 2021-11-08T17:07:31
| 253,281,893
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 523
|
py
|
number = int(input())
def loading(num):
loading_bar = []
x = num // 10
y = ((100 - num) // 10)
z = '[' + x * '%' + y * '.' + ']'
loading_bar.append(z)
if (number // 10) == 10:
loading_bar.insert(0, '100% Complete!')
print(loading_bar[0])
print(loading_bar[1])
else:
loading_bar.insert(0, f'{num}%')
loading_bar.append('Still loading...')
print(' ' .join(loading_bar[0:2]))
print(loading_bar[2])
return loading_bar
loading(number)
|
[
"55747390+ivklisurova@users.noreply.github.com"
] |
55747390+ivklisurova@users.noreply.github.com
|
3555ae78bbf5408ef0a69abe52dedbabad3a0cdf
|
57ae5bfbb24ba5fec90c9b3ecf1e15f06f1546ee
|
/7-5-1.py
|
c11af7c98b341df6cf8ce912c9b84b4464523aac
|
[] |
no_license
|
cjjhust/python_datastucture
|
f9104f1d5cae2df6436f56d4d3eaa212e6045943
|
a222bbe4bfc145ee11f73676c8f033f451d45f78
|
refs/heads/master
| 2022-07-03T19:53:07.066387
| 2020-05-15T08:06:21
| 2020-05-15T08:06:21
| 260,272,705
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 588
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 10 17:36:36 2018
@author: CJJ
"""
import random
def getRandomM(a,n,m):
if a==None or n<=0 or n<m:
print "参数不合理"
return
i=0
while i<m:
j=random.randint(i,n-1) # // 获取i到n-1间的随机数
# 随机选出的元素放到数组的前面
tmp=a[i]
a[i]=a[j]
a[j]=tmp
i +=1
if __name__=="__main__":
a= [1, 2, 3, 4, 5, 6, 7, 8, 9,10 ]
n = 10
m = 6
getRandomM(a, n, m)
i=0
while i<m:
print a[i],
i +=1
|
[
"44698055@qq.com"
] |
44698055@qq.com
|
d8c1bee039b5b2874724ca9a9ee2b2dd01e43952
|
7da433fc52a167a1e9b593eda1d1ee9eee03ccf1
|
/2019-prog-labooefeningen-forrestjan/week8/week08_test_bestanden_klassen/test_spelers.py
|
d18fe5544310a4f2dedbacc858045acbeab0d460
|
[] |
no_license
|
forrestjan/Labos-MCT-19-20
|
69cb26d2f1584a54db32750037dcb900a65a0ae6
|
c9392cf0cbd9ad6e5974140060b9d0beaf0a202f
|
refs/heads/main
| 2022-12-30T14:49:35.461246
| 2020-10-14T11:59:16
| 2020-10-14T11:59:16
| 304,000,514
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,207
|
py
|
from model.Speler import Speler
from model.Geboortedatum import Geboortedatum
def test_oef1():
# aanspreken van een public static variabele (attribute)
Speler.naam_ploeg = "Rode duivels"
sp1 = Speler("Thibault", "Cortous", "keeper", 8, 0)
sp2 = Speler("Vincent", "Kompany", "aanvaller", 8, 3)
# par 4 en 5 worden niet opgegeven, zie default par in classe __init__()
sp3 = Speler("Axel", "Witsel", "aanvaller")
print(sp1)
print(sp2)
print(sp3)
print("\nVincent scoort!")
sp2.maak_doelpunt()
print(sp2)
print("\nAxel scoort!")
sp3.maak_doelpunt()
print(sp3)
print(
f"Het doelpunten saldo van { Speler.naam_ploeg } is { Speler.get_doelpunten_saldo_ploeg()}")
# test_oef1()
def test_spelers_oef3():
sp1 = Speler("Thibault", "Cortous", "keeper",
8, 0, Geboortedatum(11, 5, 1992))
sp2 = Speler("Vincent", "Kompany", "aanvaller",
8, 3, Geboortedatum(10, 4, 1986))
sp3 = Speler("Axel", "Witsel", "aanvaller")
print("\nDe geboortedata van de spelers zijn:")
for speler in [sp1, sp2, sp3]:
print(f"{speler} -> gebootedatum: {speler.geboortedatum}")
test_spelers_oef3()
|
[
"jan.forrest@student.howest.be"
] |
jan.forrest@student.howest.be
|
69ef8c6d7beb126718ee82bda6f72258a06cd3ad
|
32226e72c8cbaa734b2bdee081c2a2d4d0322702
|
/railrl/data_management/env_replay_buffer.py
|
781568e5f90fa8db7cff99c76ab6480aa1591e56
|
[
"MIT"
] |
permissive
|
Asap7772/rail-rl-franka-eval
|
2b1cbad7adae958b3b53930a837df8a31ab885dc
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
refs/heads/master
| 2022-11-15T07:08:33.416025
| 2020-07-12T22:05:32
| 2020-07-12T22:05:32
| 279,155,722
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,705
|
py
|
from gym.spaces import Discrete
from railrl.data_management.simple_replay_buffer import SimpleReplayBuffer
from railrl.envs.env_utils import get_dim
import numpy as np
class EnvReplayBuffer(SimpleReplayBuffer):
def __init__(
self,
max_replay_buffer_size,
env,
env_info_sizes=None
):
"""
:param max_replay_buffer_size:
:param env:
"""
self.env = env
self._ob_space = env.observation_space
self._action_space = env.action_space
if env_info_sizes is None:
if hasattr(env, 'info_sizes'):
env_info_sizes = env.info_sizes
else:
env_info_sizes = dict()
super().__init__(
max_replay_buffer_size=max_replay_buffer_size,
observation_dim=get_dim(self._ob_space),
action_dim=get_dim(self._action_space),
env_info_sizes=env_info_sizes
)
def add_sample(self, observation, action, reward, terminal,
next_observation, **kwargs):
if isinstance(self._action_space, Discrete):
new_action = np.zeros(self._action_dim)
new_action[action] = 1
else:
new_action = action
return super().add_sample(
observation=observation,
action=new_action,
reward=reward,
next_observation=next_observation,
terminal=terminal,
**kwargs
)
class VPGEnvReplayBuffer(EnvReplayBuffer):
def __init__(
self,
max_replay_buffer_size,
env,
discount_factor,
):
super().__init__(max_replay_buffer_size, env)
self._returns = np.zeros((max_replay_buffer_size, 1))
self.current_trajectory_rewards = np.zeros((max_replay_buffer_size, 1))
self._max_replay_buffer_size = max_replay_buffer_size
self.discount_factor = discount_factor
self._bottom = 0
def terminate_episode(self):
returns = []
return_so_far = 0
for t in range(len(self._rewards[self._bottom:self._top]) - 1, -1, -1):
return_so_far = self._rewards[t][0] + self.discount_factor * return_so_far
returns.append(return_so_far)
returns = returns[::-1]
returns = np.reshape(np.array(returns),(len(returns), 1))
self._returns[self._bottom:self._top] = returns
self._bottom = self._top
def add_sample(self, observation, action, reward, terminal,
next_observation, **kwargs):
if self._top == self._max_replay_buffer_size:
raise EnvironmentError('Replay Buffer Overflow, please reduce the number of samples added!')
super().add_sample(observation, action, reward, terminal, next_observation, **kwargs)
def get_training_data(self):
batch= dict(
observations=self._observations[0:self._top],
actions=self._actions[0:self._top],
rewards=self._rewards[0:self._top],
terminals=self._terminals[0:self._top],
next_observations=self._next_obs[0:self._top],
returns = self._returns[0:self._top],
)
return batch
def empty_buffer(self):
self._observations = np.zeros(self._observations.shape)
self._next_obs = np.zeros(self._next_obs.shape)
self._actions = np.zeros(self._actions.shape)
self._rewards = np.zeros(self._rewards.shape)
self._terminals = np.zeros(self._terminals.shape, dtype='uint8')
self._returns = np.zeros(self._returns.shape)
self._size = 0
self._top = 0
self._bottom = 0
|
[
"asap7772@berkeley.edu"
] |
asap7772@berkeley.edu
|
ee47403e642fefd6b1e6f6c62477a24a6e0ce22c
|
dfdb672bbe3b45175806928d7688a5924fc45fee
|
/Learn Python the Hard Way Exercises/ex41.py
|
3abb331053e8c7304b85e45517c62a437d126264
|
[] |
no_license
|
mathans1695/Python-Practice
|
bd567b5210a4d9bcd830607627293d64b4baa909
|
3a8fabf14bc65b8fe973488503f12fac224a44ed
|
refs/heads/master
| 2023-01-01T13:49:05.789809
| 2020-10-26T02:37:05
| 2020-10-26T02:37:05
| 306,300,672
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,972
|
py
|
import random
from urllib import urlopen
import sys
WORD_URL = "http://learncodethehardway.org/words.txt"
WORDS = []
PHRASES = {
"class %%%(%%%):":
"Make a class named %%% that is-a %%%.",
"class %%%(object):\n\tdef __init__(self, ***)":
"class %%% has-a __init__ that takes self and *** parameters.",
"class %%%(object):\n\tdef ***(self, @@@)":
"class %%% has-a function named *** that takes self and @@@ parameters.",
"*** = %%%()":
"Set *** to an instance of class %%%.",
"***.***(@@@)":
"From *** get the *** function, and call it with parameters self, @@@.",
"***.*** = '***'":
"From *** get the *** attribute and set it to '***'."
}
# do they want to drill phrases first
PHRASE_FIRST = False
if len(sys.argv) == 2 and sys.argv[1] == "english":
PHRASE_FIRST = True
# load up the words from the website
for word in urlopen(WORD_URL).readlines():
WORDS.append(word.strip())
def convert(snippet, phrase):
class_names = [w.capitalize() for w in random.sample(WORDS, snippet.count("%%%"))]
results = []
param_names = []
for i in range(0, snippet.count("@@@")):
param_count = random.randint(1,3)
param_names.append(', '.join(random.sample(WORDS, param_count)))
for sentence in snippet, phrase:
result = sentence[:]
# fake class names
for word in class_names:
result = result.replace("%%%", word, 1)
# fake other names
for word in other_names:
result = result.replace("***", word, 1)
# fake parameter lists
for word in param_names:
result = result.replace("@@@", word, 1)
results.append(result)
return results
# keep going until they hit CTRL- D
try:
while True:
snippets = PHRASES.keys()
random.shuffle(snippets)
for snippet in snippets:
phrase = PHRASES[snippet]
question, answer = convert(snippet, phrase)
if PHRASE_FIRST:
question, answer = answer, question
print question
raw_input("> ")
print "ANSWER: %s\n\n" % answer
except EOFError:
print "\nBye"
|
[
"mathans1695@gmail.com"
] |
mathans1695@gmail.com
|
a9af131a9d4b68580fd5afc8d61703d0b57ce1d8
|
e42a61b7be7ec3412e5cea0ffe9f6e9f34d4bf8d
|
/a10sdk/core/A10_import/import_periodic_ssl_cert.py
|
1a4f81061a75db8ac18bba2be452cb33eb364dc8
|
[
"Apache-2.0"
] |
permissive
|
amwelch/a10sdk-python
|
4179565afdc76cdec3601c2715a79479b3225aef
|
3e6d88c65bd1a2bf63917d14be58d782e06814e6
|
refs/heads/master
| 2021-01-20T23:17:07.270210
| 2015-08-13T17:53:23
| 2015-08-13T17:53:23
| 40,673,499
| 0
| 0
| null | 2015-08-13T17:51:35
| 2015-08-13T17:51:34
| null |
UTF-8
|
Python
| false
| false
| 2,409
|
py
|
from a10sdk.common.A10BaseClass import A10BaseClass
class SslCert(A10BaseClass):
"""Class Description::
SSL Cert File(enter bulk when import an archive file).
Class ssl-cert supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param pfx_password: {"description": "The password for certificate file (pfx type only)", "format": "string", "minLength": 1, "optional": true, "maxLength": 128, "type": "string"}
:param csr_generate: {"default": 0, "optional": true, "type": "number", "description": "Generate CSR file", "format": "flag"}
:param remote_file: {"optional": true, "type": "string", "description": "profile name for remote url", "format": "url"}
:param use_mgmt_port: {"default": 0, "optional": true, "type": "number", "description": "Use management port as source port", "format": "flag"}
:param period: {"description": "Specify the period in second", "format": "number", "type": "number", "maximum": 31536000, "minimum": 60, "optional": true}
:param certificate_type: {"optional": true, "enum": ["pem", "der", "pfx", "p7b"], "type": "string", "description": "'pem': pem; 'der': der; 'pfx': pfx; 'p7b': p7b; ", "format": "enum"}
:param ssl_cert: {"description": "SSL Cert File(enter bulk when import an archive file)", "format": "string", "minLength": 1, "optional": false, "maxLength": 255, "type": "string"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/import-periodic/ssl-cert/{ssl_cert}`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "ssl_cert"]
self.b_key = "ssl-cert"
self.a10_url="/axapi/v3/import-periodic/ssl-cert/{ssl_cert}"
self.DeviceProxy = ""
self.pfx_password = ""
self.csr_generate = ""
self.remote_file = ""
self.use_mgmt_port = ""
self.period = ""
self.certificate_type = ""
self.ssl_cert = ""
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
[
"doug@parksidesoftware.com"
] |
doug@parksidesoftware.com
|
3505d9a921cb2f8e0d01d19c363eb3e875fa5f8c
|
1a83ce28cf596558bd0d8280086e27bc48d0a500
|
/src/command_modules/azure-cli-iotcentral/setup.py
|
c771f17dc15992b5221cf74250e4dc4c4ef6371c
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
willerLiu/azure-cli
|
70d8c5fa4e00c0dd3fb3bc90434d613346a03bd2
|
628ba933b954d41ad42f5c938b0f2cac55f94be2
|
refs/heads/master
| 2020-04-10T15:08:55.939172
| 2018-11-29T21:32:10
| 2018-12-04T22:09:38
| 161,098,793
| 0
| 0
|
NOASSERTION
| 2018-12-10T01:18:57
| 2018-12-10T01:18:56
| null |
UTF-8
|
Python
| false
| false
| 1,988
|
py
|
#!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from codecs import open
from setuptools import setup
try:
from azure_bdist_wheel import cmdclass
except ImportError:
from distutils import log as logger
logger.warn("Wheel is not available, disabling bdist_wheel hook")
cmdclass = {}
VERSION = "0.1.4"
# The full list of classifiers is available at
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
DEPENDENCIES = [
'azure-cli-core',
'azure-mgmt-iotcentral==1.0.0'
]
with open('README.rst', 'r', encoding='utf-8') as f:
README = f.read()
with open('HISTORY.rst', 'r', encoding='utf-8') as f:
HISTORY = f.read()
setup(
name='azure-cli-iotcentral',
version=VERSION,
description='Microsoft Azure Command-Line Tools IoT Central Command Module',
long_description=README + '\n\n' + HISTORY,
license='MIT',
author='Microsoft Corporation',
author_email='azpycli@microsoft.com',
url='https://github.com/Azure/azure-cli',
classifiers=CLASSIFIERS,
packages=[
'azure',
'azure.cli',
'azure.cli.command_modules',
'azure.cli.command_modules.iotcentral'
],
install_requires=DEPENDENCIES,
cmdclass=cmdclass
)
|
[
"tjprescott@users.noreply.github.com"
] |
tjprescott@users.noreply.github.com
|
30baca899c2b35d673ab60b7bd8c885b3d9cb6e7
|
94dde46196ec93704367d4b3dae3a8ec700e2fd7
|
/examples/button.py
|
c475fae2f499f877cdb74c51fe1181576328600e
|
[
"LicenseRef-scancode-other-permissive",
"BSD-3-Clause"
] |
permissive
|
saghul/python-asiri
|
5200697ddb99471ff7daba415351c23430de9791
|
1060fa6805fe52f348fe1b33ebcfa5814b0fec26
|
refs/heads/master
| 2021-01-01T18:47:44.947924
| 2013-10-28T22:39:55
| 2013-10-28T22:39:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 406
|
py
|
from __future__ import print_function
from asiri import GPIO
from time import sleep
BTN = 9
gpio = GPIO(num_gpios=16)
gpio.setup(BTN, GPIO.IN)
try:
prev_input = 0
while True:
input = gpio.input(BTN)
if not prev_input and input:
print("Button pressed!")
prev_input = input
sleep(0.05)
except KeyboardInterrupt:
pass
finally:
gpio.cleanup()
|
[
"saghul@gmail.com"
] |
saghul@gmail.com
|
9d43360e38253f6483d86bd55776635339f42e08
|
8707f9244fcb0f34901a9ff79683c4f0fe883d20
|
/neo/test/iotest/test_spikeglxio.py
|
b77ec7accc6cfe3393c66907495c3963ad470306
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
teogale/python-neo
|
cd9b93d1fdbf99848684fbdedd46ced0dabd5feb
|
cd4226ddcfbace080c4734f562f706423979f2dc
|
refs/heads/master
| 2021-06-21T20:38:06.781570
| 2021-05-21T16:28:46
| 2021-05-21T16:28:46
| 195,394,638
| 0
| 0
|
BSD-3-Clause
| 2020-07-20T11:45:16
| 2019-07-05T10:59:36
|
Python
|
UTF-8
|
Python
| false
| false
| 388
|
py
|
"""
Tests of neo.io.spikeglxio
"""
import unittest
from neo.io import SpikeGLXIO
from neo.test.iotest.common_io_test import BaseTestIO
class TestSpikeGLXIO(BaseTestIO, unittest.TestCase):
ioclass = SpikeGLXIO
entities_to_download = [
'spikeglx'
]
entities_to_test = [
'spikeglx/Noise4Sam_g0'
]
if __name__ == "__main__":
unittest.main()
|
[
"sam.garcia.die@gmail.com"
] |
sam.garcia.die@gmail.com
|
155f07aadc216c4e1e5c876307b9a82734fe9ed3
|
0add7953d3e3ce2df9e8265102be39b758579753
|
/built-in/TensorFlow/Research/reinforcement-learning/DQN_for_TensorFlow/rl/xt/model/dqn/rainbow_network_cnn.py
|
9406c8f1e66f4627c29bc2808b78f94f19b43391
|
[
"Apache-2.0"
] |
permissive
|
Huawei-Ascend/modelzoo
|
ae161c0b4e581f8b62c77251e9204d958c4cf6c4
|
df51ed9c1d6dbde1deef63f2a037a369f8554406
|
refs/heads/master
| 2023-04-08T08:17:40.058206
| 2020-12-07T08:04:57
| 2020-12-07T08:04:57
| 319,219,518
| 1
| 1
|
Apache-2.0
| 2023-03-24T22:22:00
| 2020-12-07T06:01:32
|
Python
|
UTF-8
|
Python
| false
| false
| 3,107
|
py
|
"""
@Author: Jack Qian
@license : Copyright(C), Huawei
"""
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from xt.model.tf_compat import Dense, Input, Conv2D, \
Model, Adam, Lambda, Flatten, K
from xt.model.dqn.default_config import LR
from xt.model import XTModel
from xt.util.common import import_config
from xt.framework.register import Registers
@Registers.model.register
class RainbowNetworkCnn(XTModel):
"""docstring for ."""
def __init__(self, model_info):
model_config = model_info.get('model_config', None)
import_config(globals(), model_config)
self.state_dim = model_info['state_dim']
self.action_dim = model_info['action_dim']
self.learning_rate = LR
self.atoms = 51
super(RainbowNetworkCnn, self).__init__(model_info)
def create_model(self, model_info):
"""create keras model"""
state = Input(shape=self.state_dim, name='state_input')
action = Input(shape=(2, ), name='action', dtype='int32')
target_p = Input(shape=(self.atoms, ), name="target_p")
convlayer = Conv2D(32, (8, 8), strides=(4, 4), activation='relu', padding='same')(state)
convlayer = Conv2D(64, (4, 4), strides=(2, 2), activation='relu', padding='same')(convlayer)
convlayer = Conv2D(64, (3, 3), strides=(1, 1), activation='relu', padding='same')(convlayer)
flattenlayer = Flatten()(convlayer)
denselayer = Dense(512, activation='relu')(flattenlayer)
value = Dense(1, activation=None)(denselayer)
denselayer = Dense(512, activation='relu')(flattenlayer)
atom = Dense(self.action_dim * self.atoms, activation=None)(denselayer)
mean = Lambda(lambda x: tf.subtract(
tf.reshape(x, [-1, self.action_dim, self.atoms]),
tf.reduce_mean(tf.reshape(x, [-1, self.action_dim, self.atoms]), axis=1, keep_dims=True)))(atom)
value = Lambda(lambda x: tf.add(tf.expand_dims(x[0], 1), x[1]))([value, mean])
#prob = Lambda(lambda x: tf.nn.softmax(x), name="output")(value)
#pylint error Lambda may not be necessary
prob = tf.nn.softmax(value, name="output")
model = Model(inputs=[state, action, target_p], outputs=prob)
adam = Adam(lr=self.learning_rate, clipnorm=10.)
model.compile(loss=[dist_dqn_loss(action=action, target_p=target_p)], optimizer=adam)
return model
def train(self, state, label):
with self.graph.as_default():
K.set_session(self.sess)
# print(type(state[2][0][0]))
loss = self.model.fit(x={
'state_input': state[0],
'action': state[1],
'target_p': state[2]
},
y={"output": label},
verbose=0)
return loss
def dist_dqn_loss(action, target_p):
"""loss for rainbow"""
def loss(y_true, y_pred):
y_pred = tf.gather_nd(y_pred, action)
return -K.mean(target_p * K.log((y_pred + 1e-10)))
return loss
|
[
"1571856591@qq.com"
] |
1571856591@qq.com
|
7f1bf9f84ba0a732878a0df6d12969923da3f860
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03417/s820557335.py
|
781e7a91ed400cdd9fa97625e372b4993251ae0e
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 288
|
py
|
N, M = map(int, input().split())
N, M = min(N,M), max(N,M)
ans = [[1], [2, 0], [3, 0, 9], [2, 0, 6, 4]]
if M <= 4:
print(ans[M-1][N-1])
exit()
if N == 1:
print(M-2)
exit()
if N == 2:
print(0)
exit()
if N == 3:
print(N*M - 6)
exit()
print(4 + (N-4 + M-4)*2 + (N-4)*(M-4))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
d9c2578c85a6972b534c9c7bb4f968d53c437283
|
c3c31ce9a8822ac0352475934f5b3fbdacac62a1
|
/ssseg/cfgs/setr/cfgs_ade20k_vitlargepup.py
|
f48eba513dac55967b0f6bcdfe89c5ae16221369
|
[
"MIT"
] |
permissive
|
guofenggitlearning/sssegmentation
|
1e51b5b14bff3b5ad0d469ac98d711adb79cef11
|
7a405b1a4949606deae067223ebd68cceec6b225
|
refs/heads/main
| 2023-08-28T03:26:39.204259
| 2021-11-03T00:51:30
| 2021-11-03T00:51:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,896
|
py
|
'''define the config file for ade20k and ViT-Large'''
import os
from .base_cfg import *
# modify dataset config
DATASET_CFG = DATASET_CFG.copy()
DATASET_CFG.update({
'type': 'ade20k',
'rootdir': os.path.join(os.getcwd(), 'ADE20k'),
})
# modify dataloader config
DATALOADER_CFG = DATALOADER_CFG.copy()
# modify optimizer config
OPTIMIZER_CFG = OPTIMIZER_CFG.copy()
OPTIMIZER_CFG.update(
{
'max_epochs': 130
}
)
# modify losses config
LOSSES_CFG = LOSSES_CFG.copy()
# modify model config
MODEL_CFG = MODEL_CFG.copy()
MODEL_CFG.update(
{
'num_classes': 150,
'backbone': {
'type': 'jx_vit_large_p16_384',
'series': 'vit',
'img_size': (512, 512),
'drop_rate': 0.,
'out_indices': (9, 14, 19, 23),
'norm_cfg': {'type': 'layernorm', 'opts': {'eps': 1e-6}},
'pretrained': True,
'selected_indices': (0, 1, 2, 3),
},
'auxiliary': [
{'in_channels': 1024, 'out_channels': 256, 'dropout': 0, 'num_convs': 2, 'scale_factor': 4, 'kernel_size': 3},
{'in_channels': 1024, 'out_channels': 256, 'dropout': 0, 'num_convs': 2, 'scale_factor': 4, 'kernel_size': 3},
{'in_channels': 1024, 'out_channels': 256, 'dropout': 0, 'num_convs': 2, 'scale_factor': 4, 'kernel_size': 3},
],
}
)
# modify inference config
INFERENCE_CFG = INFERENCE_CFG.copy()
# modify common config
COMMON_CFG = COMMON_CFG.copy()
COMMON_CFG['train'].update(
{
'backupdir': 'setrpup_vitlarge_ade20k_train',
'logfilepath': 'setrpup_vitlarge_ade20k_train/train.log',
}
)
COMMON_CFG['test'].update(
{
'backupdir': 'setrpup_vitlarge_ade20k_test',
'logfilepath': 'setrpup_vitlarge_ade20k_test/test.log',
'resultsavepath': 'setrpup_vitlarge_ade20k_test/setrpup_vitlarge_ade20k_results.pkl'
}
)
|
[
"1159254961@qq.com"
] |
1159254961@qq.com
|
2591de4f548ec359e4c3fca9f0f9399fabf88dfb
|
2e157761ea124b5cdbadbad61daded246deaafb2
|
/wagtail/contrib/wagtailsearchpromotions/migrations/0001_initial.py
|
62809da8def86144e08439d9c142bfec5ecd05d5
|
[
"BSD-3-Clause"
] |
permissive
|
wgiddens/wagtail
|
043e6b110229cd29d64a22860085355d38f66e03
|
4371368854a99ef754c3332ab10675ba62e614a6
|
refs/heads/master
| 2021-05-23T03:20:13.455601
| 2015-08-31T19:45:06
| 2015-08-31T19:45:06
| 41,701,494
| 0
| 0
|
NOASSERTION
| 2020-11-16T13:48:19
| 2015-08-31T21:27:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,750
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0015_add_more_verbose_names'),
('wagtailsearch', '0003_remove_editors_pick'),
]
operations = [
migrations.SeparateDatabaseAndState(
state_operations=[
migrations.CreateModel(
name='EditorsPick',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('sort_order', models.IntegerField(editable=False, null=True, blank=True)),
('description', models.TextField(verbose_name='Description', blank=True)),
('page', models.ForeignKey(verbose_name='Page', to='wagtailcore.Page')),
('query', models.ForeignKey(to='wagtailsearch.Query', related_name='editors_picks')),
],
options={
'db_table': 'wagtailsearch_editorspick',
'verbose_name': "Editor's Pick",
'ordering': ('sort_order',),
},
),
],
database_operations=[]
),
migrations.AlterModelTable(
name='editorspick',
table=None,
),
migrations.RenameModel(
old_name='EditorsPick',
new_name='SearchPromotion'
),
migrations.AlterModelOptions(
name='searchpromotion',
options={'ordering': ('sort_order',), 'verbose_name': 'Search promotion'},
),
]
|
[
"karlhobley10@gmail.com"
] |
karlhobley10@gmail.com
|
a6e7ad120c45a540a1287acfb626fbd4a3de82fb
|
ab9ab9e30b8c50273d2e01e3a497b8fd1a8e8841
|
/Democode/evolution-strategies-starter-master/es_distributed/main.py
|
483d22a5b846be797fd5c18a22e9c0406fed474f
|
[
"MIT"
] |
permissive
|
Asurada2015/Multi-objective-evolution-strategy
|
32a88abe584beae24cc96a020ff79659176f1916
|
62f85e9fd23c9f6a3344855614a74e988bf3edd3
|
refs/heads/master
| 2023-02-03T04:19:28.394586
| 2020-04-14T12:25:06
| 2020-04-14T12:25:06
| 154,918,864
| 3
| 1
|
MIT
| 2023-02-02T03:23:23
| 2018-10-27T03:03:39
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,169
|
py
|
import errno
import json
import logging
import os
import sys
import click
from .dist import RelayClient
from .es import run_master, run_worker, SharedNoiseTable
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
@click.group()
def cli():
logging.basicConfig(
format='[%(asctime)s pid=%(process)d] %(message)s',
level=logging.INFO,
stream=sys.stderr)
@cli.command()
@click.option('--exp_str')
@click.option('--exp_file')
@click.option('--master_socket_path', required=True)
@click.option('--log_dir')
def master(exp_str, exp_file, master_socket_path, log_dir):
# Start the master
assert (exp_str is None) != (exp_file is None), 'Must provide exp_str xor exp_file to the master'
if exp_str:
exp = json.loads(exp_str)
elif exp_file:
with open(exp_file, 'r') as f:
exp = json.loads(f.read())
else:
assert False
log_dir = os.path.expanduser(log_dir) if log_dir else '/tmp/es_master_{}'.format(os.getpid())
mkdir_p(log_dir)
run_master({'unix_socket_path': master_socket_path}, log_dir, exp)
@cli.command()
@click.option('--master_host', required=True)
@click.option('--master_port', default=6379, type=int)
@click.option('--relay_socket_path', required=True)
@click.option('--num_workers', type=int, default=0)
def workers(master_host, master_port, relay_socket_path, num_workers):
# Start the relay
master_redis_cfg = {'host': master_host, 'port': master_port}
relay_redis_cfg = {'unix_socket_path': relay_socket_path}
if os.fork() == 0:
RelayClient(master_redis_cfg, relay_redis_cfg).run()
return
# Start the workers
noise = SharedNoiseTable() # Workers share the same noise
num_workers = num_workers if num_workers else os.cpu_count()
logging.info('Spawning {} workers'.format(num_workers))
for _ in range(num_workers):
if os.fork() == 0:
run_worker(relay_redis_cfg, noise=noise)
return
os.wait()
if __name__ == '__main__':
cli()
|
[
"1786546913@qq.com"
] |
1786546913@qq.com
|
ee19405107fff01363a5004a6f245590587e3446
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4424/codes/1674_1101.py
|
ca6bd59c9146923fe8a25836f1b91360fddcbc3d
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109
| 2020-05-23T00:24:26
| 2020-05-23T00:24:26
| 266,199,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 939
|
py
|
# Teste seu código aos poucos.
# Não teste tudo no final, pois fica mais difícil de identificar erros.
# Use as mensagens de erro para corrigir seu código.
c = float(input("consumo em kWh: "))
t = input("residencias(r)/industrias(i)/comercios(c): ")
print("Entradas:", c, "kWh e tipo", t)
y = "Dados invalidos"
if ((t.lower()!= "r") and (t.lower()!= "i") and (t.lower()!= "c")) or (c<0):
print(y)
elif (c <= 500) and (t.lower()=="r"):
a = c*0.44
print('Valor total: R$ ', round(a, 2))
elif (c > 500) and (t.lower()== "r"):
a = c*0.65
print('Valor total: R$ ', round(a, 2))
elif (c<=1000) and (t.lower()== "c"):
a = c*0.55
print('Valor total: R$ ', round(a, 2))
elif (c>1000) and (t.lower()== "c"):
a = c*0.60
print('Valor total: R$ ', round(a, 2))
elif (c<=5000) and (t.lower()== "i"):
a = c*0.55
print('Valor total: R$ ', round(a, 2))
elif (c>5000) and (t.lower()== "i"):
a = c*0.60
print('Valor total: R$ ', round(a, 2))
|
[
"jvlo@icomp.ufam.edu.br"
] |
jvlo@icomp.ufam.edu.br
|
10fbdc05794edb4e95044f0031de8333a7f73a81
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_peeped.py
|
a186557aa21742047c7e1738b7c7afc26028a481
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 216
|
py
|
#calss header
class _PEEPED():
def __init__(self,):
self.name = "PEEPED"
self.definitions = peep
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['peep']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
a68501fe0fffd3341fe23f8561c763687c4ae485
|
efc9b70544c0bc108aaec0ed6a2aefdf208fd266
|
/393. UTF-8 Validation.py
|
cbd33de9cd3519f3032cff678b73861abd68bab3
|
[] |
no_license
|
fxy1018/Leetcode
|
75fad14701703d6a6a36dd52c338ca56c5fa9eff
|
604efd2c53c369fb262f42f7f7f31997ea4d029b
|
refs/heads/master
| 2022-12-22T23:42:17.412776
| 2022-12-15T21:27:37
| 2022-12-15T21:27:37
| 78,082,899
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 671
|
py
|
'''
'''
class Solution(object):
def validUtf8(self, data):
"""
:type data: List[int]
:rtype: bool
"""
count = 0
for d in data:
if count == 0: #first byte
if bin(d>>5) == "0b110":
count = 1
elif bin(d>>4) == "0b1110":
count = 2
elif bin(d>>3) == "0b11110":
count = 3
elif bin(d>>7) != "0b0":
return(False)
else:
if bin(d>>6) != "0b10":
return(False)
count -=1
return(count == 0)
|
[
"noreply@github.com"
] |
fxy1018.noreply@github.com
|
e95b9c3d1cd819f037b70f66076236dc02ccf425
|
73346545e69194dc1cfd887314afe600076ff263
|
/polling_stations/apps/data_collection/management/commands/import_greenwich.py
|
385a8fd726114f0e6f8492cc5e4ce64a3ce77aec
|
[] |
permissive
|
chris48s/UK-Polling-Stations
|
c7a91f80c1ea423156ac75d88dfca31ca57473ff
|
4742b527dae94f0276d35c80460837be743b7d17
|
refs/heads/master
| 2021-08-27T18:26:07.155592
| 2017-11-29T15:57:23
| 2017-11-29T15:57:23
| 50,743,117
| 1
| 0
|
BSD-3-Clause
| 2017-11-29T16:03:45
| 2016-01-30T20:20:50
|
Python
|
UTF-8
|
Python
| false
| false
| 406
|
py
|
from data_collection.management.commands import BaseHalaroseCsvImporter
class Command(BaseHalaroseCsvImporter):
council_id = 'E09000011'
addresses_name = 'parl.2017-06-08/Version 1/polling_station_export-2017-05-09 2.csv'
stations_name = 'parl.2017-06-08/Version 1/polling_station_export-2017-05-09 2.csv'
elections = ['parl.2017-06-08']
csv_encoding = 'windows-1252'
|
[
"chris.shaw480@gmail.com"
] |
chris.shaw480@gmail.com
|
8afa870d5cbd78906a2398e74afc2fcfdc6b3ccb
|
df264c442075e04bb09d82f9be1d915c070d7e09
|
/SAMSUNG/SWEA/PROBLEM/2105_디저트카페_20210419.py
|
04f8efd218eb8d69a5537a62f880f1ea920a3d47
|
[] |
no_license
|
Koozzi/Algorithms
|
ff7a73726f18e87cab9406e7b71cd5d1856df183
|
38048ac0774fcab3537bdd64f48cae7d9eb71e6f
|
refs/heads/master
| 2021-07-06T04:59:38.564772
| 2021-05-03T08:49:56
| 2021-05-03T08:49:56
| 231,848,463
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,293
|
py
|
def change_direction(X, Y, x, y, d1, d2):
if (X == x + d1 and Y == y - d1) \
or (X == x + d1 + d2 and Y == y - d1 + d2) \
or (X == x + d2 and Y == y + d2):
return True
return False
def solve(x, y, d1, d2):
desert = set([board[x][y]])
move = [[1, -1], [1, 1], [-1, 1], [-1, -1]]
X, Y, D = x, y, 0
while True:
if change_direction(X, Y, x, y, d1, d2):
D += 1
X += move[D][0]
Y += move[D][1]
if X == x and Y == y:
break
if board[X][Y] not in desert:
desert.add(board[X][Y])
elif board[X][Y] in desert:
return -2
return len(desert)
T = int(input())
for t in range(1, T + 1):
N = int(input())
board = [list(map(int, input().split())) for _ in range(N)]
answer = -1
for x in range(N - 2):
for y in range(N - 1):
for d1 in range(1, N):
for d2 in range(1, N):
if 0 <= y - d1 and x + d1 + d2 <= N - 1 and y + d2 <= N - 1:
answer = max(answer, solve(x, y, d1, d2))
print("#{} {}".format(t, answer))
"""
2
4
9 8 9 8
4 6 9 4
8 7 7 8
4 5 3 5
5
8 2 9 6 6
1 9 3 3 4
8 2 3 3 6
4 3 4 4 9
7 4 6 3 5
"""
|
[
"koozzi666@gmail.com"
] |
koozzi666@gmail.com
|
58eae06b2d666c5bc0e9960cc336e03c78e232e9
|
cad91ae76d2746a6c28ddda0f33a58f9d461378f
|
/TensorFlow2/Recommendation/DLRM_and_DCNv2/nn/evaluator.py
|
eeb9354ef38f8574935923ebfc03a08286e22b2e
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/DeepLearningExamples
|
fe677521e7e2a16e3cb0b77e358f9aab72f8c11a
|
a5388a45f71a949639b35cc5b990bd130d2d8164
|
refs/heads/master
| 2023-08-31T20:57:08.798455
| 2023-08-23T10:09:12
| 2023-08-23T10:09:12
| 131,881,622
| 11,838
| 3,124
| null | 2023-08-28T16:57:33
| 2018-05-02T17:04:05
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,299
|
py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel (tgrel@nvidia.com)
import tensorflow as tf
import time
from .nn_utils import create_inputs_dict
class Evaluator:
def __init__(self, model, timer, auc_thresholds, max_steps=None, cast_dtype=None, distributed=False):
self.model = model
self.timer = timer
self.max_steps = max_steps
self.cast_dtype = cast_dtype
self.distributed = distributed
if self.distributed:
import horovod.tensorflow as hvd
self.hvd = hvd
else:
self.hvd = None
self.auc_metric = tf.keras.metrics.AUC(num_thresholds=auc_thresholds, curve='ROC',
summation_method='interpolation', from_logits=True)
self.bce_op = tf.keras.losses.BinaryCrossentropy(reduction=tf.keras.losses.Reduction.NONE, from_logits=True)
def _reset(self):
self.latencies, self.all_test_losses = [], []
self.auc_metric.reset_state()
@tf.function
def update_auc_metric(self, labels, y_pred):
self.auc_metric.update_state(labels, y_pred)
@tf.function
def compute_bce_loss(self, labels, y_pred):
return self.bce_op(labels, y_pred)
def _step(self, pipe):
begin = time.time()
batch = pipe.get_next()
(numerical_features, categorical_features), labels = batch
if self.cast_dtype is not None:
numerical_features = tf.cast(numerical_features, self.cast_dtype)
inputs = create_inputs_dict(numerical_features, categorical_features)
y_pred = self.model(inputs, sigmoid=False, training=False)
end = time.time()
self.latencies.append(end - begin)
if self.distributed:
y_pred = self.hvd.allgather(y_pred)
labels = self.hvd.allgather(labels)
self.timer.step_test()
if not self.distributed or self.hvd.rank() == 0:
self.update_auc_metric(labels, y_pred)
test_loss = self.compute_bce_loss(labels, y_pred)
self.all_test_losses.append(test_loss)
def __call__(self, validation_pipeline):
self._reset()
auc, test_loss = 0, 0
pipe = iter(validation_pipeline.op())
num_steps = len(validation_pipeline)
if self.max_steps is not None and self.max_steps >= 0:
num_steps = min(num_steps, self.max_steps)
for _ in range(num_steps):
self._step(pipe)
if not self.distributed or self.hvd.rank() == 0:
auc = self.auc_metric.result().numpy().item()
test_loss = tf.reduce_mean(self.all_test_losses).numpy().item()
return auc, test_loss, self.latencies
|
[
"kkudrynski@nvidia.com"
] |
kkudrynski@nvidia.com
|
082ae1287bba556369350aaf57ecbedc6fbd0a87
|
f7c72e8adde14499f119708642e7ca1e3e7424f3
|
/network_health_service/tests/create_query_tests.py
|
523fa50216d13ffb1ad98df0c5b0b9098a793f0e
|
[
"MIT"
] |
permissive
|
pmaisel/tgnms
|
0b5d7ad90821a4e52d11490199ab1253bb31ac59
|
8ab714d29f2f817f08cad928cf54b7bf1faf0aa7
|
refs/heads/main
| 2023-08-01T10:14:28.527892
| 2021-09-24T21:28:16
| 2021-09-24T21:56:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,106
|
py
|
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import json
import unittest
from network_health_service.stats.fetch_stats import get_link_queries, get_node_queries
from network_health_service.stats.metrics import Metrics
class CreateQueryTests(unittest.TestCase):
def setUp(self) -> None:
self.maxDiff = None
with open("tests/metrics.json") as f:
metrics = json.load(f)
Metrics.update_metrics(
metrics, prometheus_hold_time=30, use_real_throughput=True
)
def test_get_link_queries(self) -> None:
expected_link_queries = {
"analytics_alignment_status": (
"sum_over_time(min by (linkName) "
'(analytics_alignment_status{network="network_A"} == bool 1) '
"[3599s:30s])"
),
"topology_link_is_online": (
"sum_over_time(min by (linkName) "
'(topology_link_is_online{network="network_A"}) [3599s:30s])'
),
"tx_byte": (
"quantile_over_time(0.75, sum by (linkName) "
'(tx_byte{network="network_A"}) [3599s:30s])'
),
"analytics_foliage_factor": (
"quantile_over_time(0.75, "
'abs(analytics_foliage_factor{network="network_A"}) [3599s:30s])'
),
"drs_cn_egress_routes_count": (
"quantile_over_time(0.75, max by (linkName) "
'(drs_cn_egress_routes_count{network="network_A"}) [3599s:30s])'
),
"tx_ok": (
"quantile_over_time(0.75, sum by (linkName) "
'(tx_ok{network="network_A",intervalSec="1"}) [3599s:1s])'
),
"link_avail": (
"max by (linkName) "
'(resets(link_avail{network="network_A",intervalSec="1"} [3600s]))'
),
"mcs": (
"quantile_over_time(0.25, min by (linkName) "
'(mcs{network="network_A",intervalSec="1"}) [3599s:1s])'
),
"mcs_diff": (
"quantile_over_time(0.75, "
'abs(mcs{network="network_A",intervalSec="1",linkDirection="A"} '
"- on (linkName) "
'mcs{network="network_A",intervalSec="1",linkDirection="Z"}) '
"[3599s:1s])"
),
"tx_power_diff": (
"quantile_over_time(0.75, "
'abs(tx_power{network="network_A",intervalSec="1",linkDirection="A"} '
"- on (linkName) "
'tx_power{network="network_A",intervalSec="1",linkDirection="Z"}) '
"[3599s:1s])"
),
}
link_queries = get_link_queries("network_A", 3600)
self.assertDictEqual(link_queries, expected_link_queries)
expected_node_queries = {
"analytics_cn_power_status": (
"sum_over_time("
'(analytics_cn_power_status{network="network_A"} == bool 3) '
"[3599s:30s])"
),
"topology_node_is_online": (
'sum_over_time(topology_node_is_online{network="network_A"} [3600s])'
),
"drs_default_routes_changed": (
"sum_over_time(drs_default_routes_changed"
'{network="network_A"} [3600s])'
),
"udp_pinger_loss_ratio": (
"sum_over_time("
'(udp_pinger_loss_ratio{network="network_A",intervalSec="30"} '
"< bool 0.9) [3599s:30s])"
),
"udp_pinger_rtt_avg": (
"quantile_over_time(0.75, "
'udp_pinger_rtt_avg{network="network_A",intervalSec="30"} [3600s])'
),
"min_route_mcs": (
"quantile_over_time(0.25, "
'drs_min_route_mcs{network="network_A"} [3599s:60s])'
),
}
node_queries = get_node_queries("network_A", 3600)
self.assertDictEqual(node_queries, expected_node_queries)
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
ab7269496f8882045c1c28e24afc8d9c5912824f
|
75bee875a2d26ed71513f46a2acbb564dd9a1c44
|
/app/modules/users/schemas.py
|
c58686ed7b25645868c0382549201f648b8ca6d9
|
[
"MIT"
] |
permissive
|
frol/flask-restplus-server-example
|
d096aa1f4e3b6024ecb16af3d0769ccc20e7cff8
|
53a3a156cc9df414537860ed677bd0cc98dd2271
|
refs/heads/master
| 2023-08-28T14:27:34.047855
| 2023-06-21T14:30:54
| 2023-06-21T14:30:54
| 46,421,329
| 1,487
| 412
|
MIT
| 2023-06-21T14:30:55
| 2015-11-18T13:43:34
|
Python
|
UTF-8
|
Python
| false
| false
| 1,179
|
py
|
# encoding: utf-8
# pylint: disable=too-few-public-methods
"""
User schemas
------------
"""
from flask_marshmallow import base_fields
from flask_restplus_patched import Schema, ModelSchema
from .models import User
class BaseUserSchema(ModelSchema):
"""
Base user schema exposes only the most general fields.
"""
class Meta:
# pylint: disable=missing-docstring
model = User
fields = (
User.id.key,
User.username.key,
User.first_name.key,
User.middle_name.key,
User.last_name.key,
)
dump_only = (
User.id.key,
)
class DetailedUserSchema(BaseUserSchema):
"""
Detailed user schema exposes all useful fields.
"""
class Meta(BaseUserSchema.Meta):
fields = BaseUserSchema.Meta.fields + (
User.email.key,
User.created.key,
User.updated.key,
User.is_active.fget.__name__,
User.is_regular_user.fget.__name__,
User.is_admin.fget.__name__,
)
class UserSignupFormSchema(Schema):
recaptcha_server_key = base_fields.String(required=True)
|
[
"frolvlad@gmail.com"
] |
frolvlad@gmail.com
|
163380fce342e9d5ef8fe2951cc513046cff926b
|
146db0a1ba53d15ab1a5c3dce5349907a49217c3
|
/omega_miya/plugins/nbnhhsh/utils.py
|
651e9331bf5ab0fa191f726697c602576fcbbf5c
|
[
"Python-2.0",
"MIT"
] |
permissive
|
hailong-z/nonebot2_miya
|
84d233122b2d785bfc230c4bfb29326844700deb
|
7d52ef52a0a13c5ac6519199e9146a6e3c80bdce
|
refs/heads/main
| 2023-03-26T14:59:31.107103
| 2021-03-09T17:01:08
| 2021-03-09T17:01:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,018
|
py
|
import aiohttp
from omega_miya.utils.Omega_Base import Result
API_URL = 'https://lab.magiconch.com/api/nbnhhsh/guess/'
async def get_guess(guess: str) -> Result:
timeout_count = 0
error_info = ''
while timeout_count < 3:
try:
timeout = aiohttp.ClientTimeout(total=10)
async with aiohttp.ClientSession(timeout=timeout) as session:
data = {'text': guess}
async with session.post(url=API_URL, data=data, timeout=timeout) as resp:
_json = await resp.json()
result = Result(error=False, info='Success', result=_json)
return result
except Exception as e:
error_info += f'{repr(e)} Occurred in get_guess trying {timeout_count + 1} using paras: {data}\n'
finally:
timeout_count += 1
else:
error_info += f'Failed too many times in get_guess using paras: {data}'
result = Result(error=True, info=error_info, result=[])
return result
|
[
"ailitonia@gmail.com"
] |
ailitonia@gmail.com
|
1bfb298507a115191ecbd58c3ed71f58ace4a479
|
1cbca82db8f5ab0eac5391e98c7d28ebab447be1
|
/workspace_tools/build.py
|
d57b54c819bd3986fd0617d0ec78bea458ac67bd
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
genba/mbed
|
59d8588f5ce59a0ebeb37e611aaa29903870195b
|
52e23e58c0174a595af367b0ef5cc79ef3933698
|
refs/heads/master
| 2020-12-07T05:22:24.730780
| 2013-10-16T13:35:35
| 2013-10-16T13:35:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,169
|
py
|
#! /usr/bin/env python
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
LIBRARIES BUILD
"""
import sys
from time import time
from os.path import join, abspath, dirname
# Be sure that the tools directory is in the search path
ROOT = abspath(join(dirname(__file__), ".."))
sys.path.append(ROOT)
from workspace_tools.toolchains import TOOLCHAINS
from workspace_tools.targets import TARGET_NAMES, TARGET_MAP
from workspace_tools.options import get_default_options_parser
from workspace_tools.build_api import build_mbed_libs, build_lib
if __name__ == '__main__':
start = time()
# Parse Options
parser = get_default_options_parser()
# Extra libraries
parser.add_option("-r", "--rtos", action="store_true", dest="rtos",
default=False, help="Compile the rtos")
parser.add_option("-e", "--eth", action="store_true", dest="eth",
default=False, help="Compile the ethernet library")
parser.add_option("-V", "--vodafone", action="store_true", dest="vodafone",
default=False, help="Compile the Vodafone library")
parser.add_option("-U", "--usb_host", action="store_true", dest="usb_host",
default=False, help="Compile the USB Host library")
parser.add_option("-u", "--usb", action="store_true", dest="usb",
default=False, help="Compile the USB Device library")
parser.add_option("-d", "--dsp", action="store_true", dest="dsp",
default=False, help="Compile the DSP library")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
default=False, help="Verbose diagnostic output")
(options, args) = parser.parse_args()
# Get target list
if options.mcu:
targets = [options.mcu]
else:
targets = TARGET_NAMES
# Get toolchains list
if options.tool:
toolchains = [options.tool]
else:
toolchains = TOOLCHAINS
# Get libraries list
libraries = []
# Additional Libraries
if options.rtos:
libraries.extend(["rtx", "rtos"])
if options.eth:
libraries.append("eth")
if options.vodafone:
libraries.append("vodafone")
if options.usb:
libraries.append("usb")
if options.usb_host:
libraries.append("usb_host")
if options.dsp:
libraries.extend(["cmsis_dsp", "dsp"])
# Build
failures = []
successes = []
for toolchain in toolchains:
for target in targets:
id = "%s::%s" % (toolchain, target)
try:
mcu = TARGET_MAP[target]
build_mbed_libs(mcu, toolchain, options=options.options,
verbose=options.verbose, clean=options.clean)
for lib_id in libraries:
build_lib(lib_id, mcu, toolchain, options=options.options,
verbose=options.verbose, clean=options.clean)
successes.append(id)
except Exception, e:
if options.verbose:
import sys, traceback
traceback.print_exc(file=sys.stdout)
sys.exit(1)
failures.append(id)
print e
# Write summary of the builds
print "\n\nCompleted in: (%.2f)s" % (time() - start)
if successes:
print "\n\nBuild successes:"
print "\n".join([" * %s" % s for s in successes])
if failures:
print "\n\nBuild failures:"
print "\n".join([" * %s" % f for f in failures])
|
[
"emilmont@gmail.com"
] |
emilmont@gmail.com
|
5b118a1a0d810eb0ff631f8ad9b5fa40f659788f
|
6a819308924a005aa66475515bd14586b97296ae
|
/venv/lib/python3.6/site-packages/pip/utils/outdated.py
|
fae374b0aa9c5dee18ab16bf1e673e35aaf0053e
|
[] |
no_license
|
AlexandrTyurikov/my_first_Django_project
|
a2c655dc295d3904c7688b8f36439ae8229d23d1
|
1a8e4d033c0ff6b1339d78c329f8beca058b019a
|
refs/heads/master
| 2020-05-04T13:20:20.100479
| 2019-05-04T23:41:39
| 2019-05-04T23:41:39
| 179,156,468
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,445
|
py
|
from __future__ import absolute_import
import datetime
import json
import logging
import os.path
import sys
from pip._vendor import lockfile
from pip._vendor.packaging import version as packaging_version
from pip.compat import total_seconds, WINDOWS
from pip.models import PyPI
from pip.locations import USER_CACHE_DIR, running_under_virtualenv
from pip.utils import ensure_dir, get_installed_version
from pip.utils.filesystem import check_path_owner
SELFCHECK_DATE_FMT = "%Y-%m-%dT%H:%M:%SZ"
logger = logging.getLogger(__name__)
class VirtualenvSelfCheckState(object):
def __init__(self):
self.statefile_path = os.path.join(sys.prefix, "pip-selfcheck.json")
# Load the existing state
try:
with open(self.statefile_path) as statefile:
self.state = json.load(statefile)
except (IOError, ValueError):
self.state = {}
def save(self, pypi_version, current_time):
# Attempt to write out our version check file
with open(self.statefile_path, "w") as statefile:
json.dump(
{
"last_check": current_time.strftime(SELFCHECK_DATE_FMT),
"pypi_version": pypi_version,
},
statefile,
sort_keys=True,
separators=(",", ":")
)
class GlobalSelfCheckState(object):
def __init__(self):
self.statefile_path = os.path.join(USER_CACHE_DIR, "selfcheck.json")
# Load the existing state
try:
with open(self.statefile_path) as statefile:
self.state = json.load(statefile)[sys.prefix]
except (IOError, ValueError, KeyError):
self.state = {}
def save(self, pypi_version, current_time):
# Check to make sure that we own the book
if not check_path_owner(os.path.dirname(self.statefile_path)):
return
# Now that we've ensured the book is owned by this user, we'll go
# ahead and make sure that all our directories are created.
ensure_dir(os.path.dirname(self.statefile_path))
# Attempt to write out our version check file
with lockfile.LockFile(self.statefile_path):
if os.path.exists(self.statefile_path):
with open(self.statefile_path) as statefile:
state = json.load(statefile)
else:
state = {}
state[sys.prefix] = {
"last_check": current_time.strftime(SELFCHECK_DATE_FMT),
"pypi_version": pypi_version,
}
with open(self.statefile_path, "w") as statefile:
json.dump(state, statefile, sort_keys=True,
separators=(",", ":"))
def load_selfcheck_statefile():
if running_under_virtualenv():
return VirtualenvSelfCheckState()
else:
return GlobalSelfCheckState()
def pip_version_check(session):
"""Check for an update for pip.
Limit the frequency of checks to once per week. State is stored either in
the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix
of the pip script path.
"""
installed_version = get_installed_version("pip")
if installed_version is None:
return
pip_version = packaging_version.parse(installed_version)
pypi_version = None
try:
state = load_selfcheck_statefile()
current_time = datetime.datetime.utcnow()
# Determine if we need to refresh the state
if "last_check" in state.state and "pypi_version" in state.state:
last_check = datetime.datetime.strptime(
state.state["last_check"],
SELFCHECK_DATE_FMT
)
if total_seconds(current_time - last_check) < 7 * 24 * 60 * 60:
pypi_version = state.state["pypi_version"]
# Refresh the version if we need to or just see if we need to warn
if pypi_version is None:
resp = session.get(
PyPI.pip_json_url,
headers={"Accept": "application/json"},
)
resp.raise_for_status()
pypi_version = [
v for v in sorted(
list(resp.json()["releases"]),
key=packaging_version.parse,
)
if not packaging_version.parse(v).is_prerelease
][-1]
# save that we've performed a check
state.save(pypi_version, current_time)
remote_version = packaging_version.parse(pypi_version)
# Determine if our pypi_version is older
if (pip_version < remote_version and
pip_version.base_version != remote_version.base_version):
# Advise "python -m pip" on Windows to avoid issues
# with overwriting pip.exe.
if WINDOWS:
pip_cmd = "python -m pip"
else:
pip_cmd = "pip"
logger.warning(
"You are using pip version %s, however version %s is "
"available.\nYou should consider upgrading via the "
"'%s install --upgrade pip' command.",
pip_version, pypi_version, pip_cmd
)
except Exception:
logger.debug(
"There was an error checking the latest version of pip",
exc_info=True,
)
|
[
"tyur.sh@gmail.com"
] |
tyur.sh@gmail.com
|
5d1dab4d0840ab21108dceab9c5e541f8aacef51
|
eb38517d24bb32cd8a33206d4588c3e80f51132d
|
/pre_procanny_proy2.py
|
d9c459946676f85c4f0c72c265bf462d2fc0b269
|
[] |
no_license
|
Fernando23296/l_proy
|
2c6e209892112ceafa00c3584883880c856b6983
|
b7fdf99b9bd833ca1c957d106b2429cbd378abd3
|
refs/heads/master
| 2020-04-01T18:01:41.333302
| 2018-12-04T23:45:53
| 2018-12-04T23:45:53
| 153,466,681
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,645
|
py
|
'''
FILLING AN IMAGE OF:
- FILLING AN IMAGE
- GRAY
- EROSION
- THRESHOLDING (146,196)
- GAUSSIAN BLUR
- CANNY
- PREWITT
- SKELETONIZE
'''
import scipy.ndimage.morphology as morp
import numpy as np
import cv2
import imutils
import matplotlib.pyplot as plt
def skeletonize(img):
struct = np.array([[[[0, 0, 0], [0, 1, 0], [1, 1, 1]],
[[1, 1, 1], [0, 0, 0], [0, 0, 0]]],
[[[0, 0, 0], [1, 1, 0], [0, 1, 0]],
[[0, 1, 1], [0, 0, 1], [0, 0, 0]]],
[[[0, 0, 1], [0, 1, 1], [0, 0, 1]],
[[1, 0, 0], [1, 0, 0], [1, 0, 0]]],
[[[0, 0, 0], [0, 1, 1], [0, 1, 0]],
[[1, 1, 0], [1, 0, 0], [0, 0, 0]]],
[[[1, 1, 1], [0, 1, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [1, 1, 1]]],
[[[0, 1, 0], [0, 1, 1], [0, 0, 0]],
[[0, 0, 0], [1, 0, 0], [1, 1, 0]]],
[[[1, 0, 0], [1, 1, 0], [1, 0, 0]],
[[0, 0, 1], [0, 0, 1], [0, 0, 1]]],
[[[0, 1, 0], [1, 1, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 1], [0, 1, 1]]]])
img = img.copy()
last = ()
while np.any(img != last):
last = img
for s in struct:
img = np.logical_and(img, np.logical_not(
morp.binary_hit_or_miss(img, *s)))
return img
img = cv2.imread('ex4.jpg')
dimensions = img.shape
# height, width, number of channels in image
height = img.shape[0]
width = img.shape[1]
qua = int(width/10)
qua2=int(qua*3)
qua7 = int(qua*7)
img[0:height, 0:qua2] = [0]
img[0:height, qua7:width] = [0]
kernel = np.ones((5, 5), np.uint8)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
erosion = cv2.erode(gray, kernel, iterations=2)
#prometedor pero no da para el ex5.jpg
#thresh = cv2.threshold(gray, 180, 46, cv2.THRESH_BINARY)[1]
#prometedor para ex5.jpg y ex1.jpg
#thresh = cv2.threshold(gray, 150, 200, cv2.THRESH_BINARY)[1]
thresh = cv2.threshold(gray, 180, 46, cv2.THRESH_BINARY)[1]
img_gaussian = cv2.GaussianBlur(thresh, (3, 3), 0)
img = cv2.Canny(img_gaussian, 50, 200)
kernelx = np.array([[1, 1, 1], [0, 0, 0], [-1, -1, -1]])
kernely = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])
img_prewittx = cv2.filter2D(img, -1, kernelx)
img_prewitty = cv2.filter2D(img, -1, kernely)
img = img_prewittx + img_prewitty
ret, img = cv2.threshold(img, 172, 255, 0)
skel = skeletonize(img)
cv2.imwrite('.png', skel.astype(np.uint8)*255)
#cv2.imshow("skel", skel.astype(np.uint8)*255)
#cv2.waitKey(0)
|
[
"fernando23296@gmail.com"
] |
fernando23296@gmail.com
|
e88bc1ada639aa5f6c82bbea6feb7484fde89fda
|
3388cf3dfde334e6eddc845879b48e9804d8d374
|
/src/rocks-pylib/rocks/commands/set/host/power/plugin_physical_host.py
|
5f530e9363ffdb90e1e2697b5c2e8d1504b23c79
|
[] |
no_license
|
scottsakai/core
|
16c6d83a4ee33a534ab0e0a1462680a1183c7881
|
21bced45edd9b70258fa59929f09b102f7874060
|
refs/heads/master
| 2021-08-14T07:12:42.726105
| 2017-11-14T23:57:36
| 2017-11-14T23:57:36
| 109,899,541
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,669
|
py
|
# $Id: plugin_physical_host.py,v 1.6 2012/11/27 00:48:29 phil Exp $
#
# @Copyright@
#
# Rocks(r)
# www.rocksclusters.org
# version 6.2 (SideWinder)
#
# Copyright (c) 2000 - 2014 The Regents of the University of California.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice unmodified and in its entirety, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. All advertising and press materials, printed or electronic, mentioning
# features or use of this software must display the following acknowledgement:
#
# "This product includes software developed by the Rocks(r)
# Cluster Group at the San Diego Supercomputer Center at the
# University of California, San Diego and its contributors."
#
# 4. Except as permitted for the purposes of acknowledgment in paragraph 3,
# neither the name or logo of this software nor the names of its
# authors may be used to endorse or promote products derived from this
# software without specific prior written permission. The name of the
# software includes the following terms, and any derivatives thereof:
# "Rocks", "Rocks Clusters", and "Avalanche Installer". For licensing of
# the associated name, interested parties should contact Technology
# Transfer & Intellectual Property Services, University of California,
# San Diego, 9500 Gilman Drive, Mail Code 0910, La Jolla, CA 92093-0910,
# Ph: (858) 534-5815, FAX: (858) 534-7345, E-MAIL:invent@ucsd.edu
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# @Copyright@
#
# $Log: plugin_physical_host.py,v $
# Revision 1.6 2012/11/27 00:48:29 phil
# Copyright Storm for Emerald Boa
#
# Revision 1.5 2012/05/06 05:48:35 phil
# Copyright Storm for Mamba
#
# Revision 1.4 2011/07/23 02:30:38 phil
# Viper Copyright
#
# Revision 1.3 2010/09/07 23:53:01 bruno
# star power for gb
#
# Revision 1.2 2010/07/14 19:39:39 bruno
# better
#
# Revision 1.1 2010/06/22 21:42:36 bruno
# power control and console access for VMs
#
#
import rocks.commands
class Plugin(rocks.commands.Plugin):
def provides(self):
return 'physical-host'
def run(self, args):
host = args[0]
state = args[1]
rsakey = args[2]
#
# determine if this is a physical host
#
physnode = 1
rows = self.db.execute("""show tables like 'vm_nodes' """)
if rows == 1:
rows = self.db.execute("""select vn.id from
vm_nodes vn, nodes n where vn.node = n.id and
n.name = "%s" """ % (host))
if rows == 1:
physnode = 0
if physnode:
#
# write IPMI commands here
#
pass
|
[
"ppapadopoulos@ucsd.edu"
] |
ppapadopoulos@ucsd.edu
|
b96249085246a6f8048ee663380edb20dc84e461
|
452b8eed791cb6dfda9505f2b918f5eaa0331319
|
/utils.py
|
5c2b2aa9296ec954f6692d6af623611b82f4b0bb
|
[] |
no_license
|
AntLouiz/hello_world_algoritmo_genetico
|
83829b9317af04eb00437f8334a3e1ed7e7a218a
|
f97589976a01fa2235cec59888611476133b06ad
|
refs/heads/master
| 2020-04-02T16:08:57.162778
| 2018-11-01T11:55:48
| 2018-11-01T11:55:48
| 154,600,417
| 0
| 0
| null | 2018-11-01T01:55:46
| 2018-10-25T02:49:15
|
Python
|
UTF-8
|
Python
| false
| false
| 1,746
|
py
|
import sys
from random import randint
from settings import (
ALPHABET,
MASTER_SOLUTION,
INITIAL_POPULATION,
MUTATION_PERCENTUAL,
ELITISM_PERCENTUAL
)
from individual import Individual
from operator import attrgetter
def generate_individual(max_length):
gene = ''
for i in range(max_length):
gene += ALPHABET[randint(0, len(ALPHABET) - 1)]
return Individual(gene)
def tournament_selection(population):
selected_candidates = []
total_candidates = int((INITIAL_POPULATION * ELITISM_PERCENTUAL) / 100)
for i in range(total_candidates):
selected_candidates.append(max(population, key=attrgetter('fitness')))
population = population[total_candidates:]
for x in population:
arena = [population[randint(0, len(population) - 1)] for i in range(2)]
best = max(arena, key=attrgetter('fitness'))
print(best)
if(best.fitness == len(MASTER_SOLUTION)):
print("ENCONTREI A MELHOR SOLUCAO: {}".format(best.gene))
sys.exit()
selected_candidates.append(best)
return zip(selected_candidates, selected_candidates[int(len(selected_candidates) / 2):])
def crossover(first_individual, second_individual):
binary_mask = [randint(0, 1) for i in range(len(MASTER_SOLUTION))]
son = ''
for i in range(len(binary_mask)):
if binary_mask[i]:
son += first_individual.gene[i]
else:
son += second_individual.gene[i]
return Individual(son)
def mutate_population(population):
total_mutations = int((INITIAL_POPULATION * MUTATION_PERCENTUAL) / 100)
for i in range(total_mutations):
population[randint(0, len(population) - 1)].mutate()
return population
|
[
"luizrodrigo46@hotmail.com"
] |
luizrodrigo46@hotmail.com
|
49dccb091a494ca83b829404e70ab616ba505e1e
|
4fca17a3dbc3e74ba7e46bd7869eb6d138e4c422
|
/_0186_Reverse_Words_in_a_String_II.py
|
49b93f015ec8c38a2f2c5e1a22d1a9a0ad88e3d1
|
[] |
no_license
|
mingweihe/leetcode
|
a2cfee0e004627b817a3c0321bb9c74128f8c1a7
|
edff905f63ab95cdd40447b27a9c449c9cefec37
|
refs/heads/master
| 2021-06-19T07:46:46.897952
| 2021-05-02T05:13:17
| 2021-05-02T05:13:17
| 205,740,338
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 982
|
py
|
class Solution(object):
def reverseWords(self, s):
"""
:type s: List[str]
:rtype: None Do not return anything, modify s in-place instead.
"""
# Approach 2
def reverse(left, right):
while left < right:
s[left], s[right] = s[right], s[left]
left += 1
right -= 1
reverse(0, len(s)-1)
r = 0
while r < len(s):
l = r
while r < len(s) and s[r] != ' ': r+=1
reverse(l, r-1)
r += 1
# Approach 1
# def reverse(left, right):
# while left < right:
# s[left], s[right] = s[right], s[left]
# left += 1
# right -= 1
# reverse(0, len(s)-1)
# start = 0
# for i in xrange(len(s)):
# if s[i] == ' ':
# reverse(start, i-1)
# start = i + 1
# reverse(start, len(s)-1)
|
[
"10962421@qq.com"
] |
10962421@qq.com
|
99589cbca2c51918536b06325b561d1c22a1c1e3
|
898c5364cab1d8bf6366de22753509bd021b45f3
|
/0x01-python-if_else_loops_functions/9-print_last_digit.py
|
4c5c2ceda0b9270a97360e23db8e6dd80b604205
|
[] |
no_license
|
jozsa/holbertonschool-higher_level_programming
|
98cc071321934f221ad592a066349e0be293c865
|
7920933ecf983c1856930d4d3b707e230b589231
|
refs/heads/master
| 2020-04-09T10:02:42.163683
| 2019-05-17T02:08:41
| 2019-05-17T02:08:41
| 160,255,877
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 147
|
py
|
#!/usr/bin/python3
def print_last_digit(number):
lastdigit = abs(number) % 10
print('{:d}'.format(lastdigit), end="")
return lastdigit
|
[
"allisonjoyweiner@gmail.com"
] |
allisonjoyweiner@gmail.com
|
48361c1f31049cb6f9ce19e96e8b7b5dc555067d
|
e5654e71ad4f043bb28105c3b6f3cd833e1c52dc
|
/openai/venv/lib64/python3.10/site-packages/langchain/evaluation/loading.py
|
613e261303bbfeeccb63eaa8ec5bd26bfb7b6afb
|
[] |
no_license
|
henrymendez/garage
|
0b795f020a68fe2d349b556fb8567f6b96488ed5
|
b7aaa920a52613e3f1f04fa5cd7568ad37302d11
|
refs/heads/master
| 2023-07-19T20:16:02.792007
| 2023-07-07T16:58:15
| 2023-07-07T16:58:15
| 67,760,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
from typing import Dict, List
def load_dataset(uri: str) -> List[Dict]:
from datasets import load_dataset
dataset = load_dataset(f"LangChainDatasets/{uri}")
return [d for d in dataset["train"]]
|
[
"henry95@gmail.com"
] |
henry95@gmail.com
|
230aa57ef7719c1e419ae82bc32e2a40745cc699
|
3fba33f91e1f50077dc2cce663b7de0f70a17a51
|
/wlhub/dictionaries/admin.py
|
0947f9f15e022cd47245059e1131a9e8bd47397b
|
[] |
no_license
|
azinit/wlhub
|
59be2e9f555fa6655965d13580fd05963dc414b6
|
616761ef39f4cdb82d032f737bf50c66a9e935d1
|
refs/heads/master
| 2022-12-22T12:26:33.907642
| 2020-09-13T21:45:33
| 2020-09-13T21:45:33
| 295,242,617
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 593
|
py
|
from django.contrib import admin
from core.mixins import ListLinksMixin
from dictionaries.models import *
# Register your models here.
@admin.register(Tag)
class TagAdmin(ListLinksMixin, admin.ModelAdmin):
list_display = ('name', 'user')
list_filter = ("user",)
@admin.register(Area)
class AreaAdmin(ListLinksMixin, admin.ModelAdmin):
list_display = ('name', 'description', 'user')
list_filter = ("user",)
@admin.register(Subject)
class SubjectAdmin(ListLinksMixin, admin.ModelAdmin):
list_display = ('name', 'area', 'description')
list_filter = ("area__user",)
|
[
"martis.azin@gmail.com"
] |
martis.azin@gmail.com
|
ede5ce348c523a93a566268ee747898144d0fef6
|
4be2c72579486ad04a00db0349028de96d2dce89
|
/scripts/fxpt/fx_refsystem/transform_handle.py
|
87a840f75acf62a388796d33fb8ad59c417214c4
|
[] |
no_license
|
italic-r/maya-prefs
|
6a617d40beee8937186b4699c5cead44e01c2d40
|
aa21e5e2938dc2698ce5f555ee74a594e08aed2b
|
refs/heads/master
| 2021-09-09T16:31:00.411349
| 2018-03-18T01:40:10
| 2018-03-18T01:40:10
| 86,961,959
| 16
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,015
|
py
|
from maya import cmds as m
from fxpt.fx_utils.utils_maya import getShape, getParent, parentAPI
# noinspection PyAttributeOutsideInit
class TransformHandle(object):
def __init__(self, transform=None, shape=None):
self.initHandle(transform, shape)
def __str__(self):
return 'transform={}, shape={}'.format(self.transform, self.shape)
def initHandle(self, transform=None, shape=None):
if (transform is not None) and (m.objExists(transform)):
self.transform = transform
self.shape = getShape(transform)
elif (shape is not None) and (m.objExists(shape)):
self.transform = getParent(shape)
self.shape = shape
else:
self.transform = None
self.shape = None
def getChildren(self, allDescendants=False, typ=None):
if typ:
return sorted(
m.listRelatives(
self.transform,
children=True,
allDescendents=allDescendants,
fullPath=True,
typ=typ
) or [])
else:
return sorted(
m.listRelatives(
self.transform,
children=True,
allDescendents=allDescendants,
fullPath=True
) or [])
def getParents(self, typ=None):
if typ:
return sorted(
m.listRelatives(
self.transform,
parent=True,
fullPath=True,
typ=typ
) or [])
else:
return sorted(
m.listRelatives(
self.transform,
parent=True,
fullPath=True
) or [])
def parent(self, newParent, absolute=True):
pass
def exists(self):
return (self.transform is not None) and (m.objExists(self.transform))
|
[
"italic.rendezvous@gmail.com"
] |
italic.rendezvous@gmail.com
|
03bd72acbee9ebd416f127863815001d43529260
|
03cd08ce32a2c1b3b8f4563d01a5e55b974f2c64
|
/57.py
|
1166a9b99985e44d26ae86fb512bd0f51268d76e
|
[] |
no_license
|
joemeens/Pro
|
46165be31673a6477fe7c9d2f90e12e75574daca
|
19ce75ede8c7694623c26a91cf7cc6c9a94b673b
|
refs/heads/master
| 2020-06-09T01:36:58.438553
| 2019-08-17T14:41:42
| 2019-08-17T14:41:42
| 193,344,489
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 163
|
py
|
list1,char=map(str,input().split(" "))
count=0
for i in range(0,len(list1)):
if(list1[i]==char):
count=count+1
else:
continue
print(count)
|
[
"noreply@github.com"
] |
joemeens.noreply@github.com
|
68f1edfca6e6c27a77da50e9bed255dc4783861d
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/fyyJRDHcTe9REs4Ni_24.py
|
3166469c0022b926678616faf843ff7e33b85cf2
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,131
|
py
|
"""
Create a function that takes three arguments (first dictionary, second
dictionary, key) in order to:
1. Return the boolean `True` if both dictionaries have the same values for the same keys.
2. If the dictionaries don't match, return the string `"Not the same"`, or the string `"One's empty"` if only one of the dictionaries contains the given key.
### Examples
dict_first = { "sky": "temple", "horde": "orcs", "people": 12, "story": "fine", "sun": "bright" }
dict_second = { "people": 12, "sun": "star", "book": "bad" }
check(dict_first, dict_second, "horde") ➞ "One's empty"
check(dict_first, dict_second, "people") ➞ True
check(dict_first, dict_second, "sun") ➞ "Not the same"
### Notes
* Dictionaries are an unordered data type.
* Double quotes may be helpful.
* `KeyError` can occur when trying to access a dictionary key that doesn't exist.
"""
def check(d1, d2, k):
if k in d1 and k in d2:
if d1[k]==d2[k]:
return True
else:
return "Not the same"
elif (k in d1 and k not in d2) or (k not in d1 and k in d2):
return "One's empty"
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
b498b8c9a09c4ad781755f8128413c37b8dd9431
|
124263fe47a7a990e46720ee9720dac23de53412
|
/tests/test_transforms.py
|
2feb96799180d30cfa718f5ad7b70e40b99c7bf6
|
[
"MIT"
] |
permissive
|
mahaling/pytorch-3dunet
|
17cae1176a464ffa8a42f6eaee0d7f57b8402941
|
458985ebc766acdc7599f92f54f10becbc4d4b95
|
refs/heads/master
| 2023-08-03T12:26:07.103875
| 2020-06-24T22:30:57
| 2020-06-24T22:30:57
| 206,171,755
| 0
| 1
|
MIT
| 2020-04-19T04:31:38
| 2019-09-03T21:02:42
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 8,017
|
py
|
import numpy as np
from augment.transforms import RandomLabelToAffinities, LabelToAffinities, Transformer, Relabel
class TestTransforms:
config = {'dtype': 'long'}
def test_random_label_to_boundary(self):
size = 20
label = _diagonal_label_volume(size)
transform = RandomLabelToAffinities(np.random.RandomState())
result = transform(label)
assert result.shape == (1,) + label.shape
def test_random_label_to_boundary_with_ignore(self):
size = 20
label = _diagonal_label_volume(size, init=-1)
transform = RandomLabelToAffinities(np.random.RandomState(), ignore_index=-1)
result = transform(label)
assert result.shape == (1,) + label.shape
assert -1 in np.unique(result)
def test_label_to_boundary(self):
size = 20
label = _diagonal_label_volume(size)
# this transform will produce 2 channels
transform = LabelToAffinities(offsets=(2, 4), aggregate_affinities=True)
result = transform(label)
assert result.shape == (2,) + label.shape
assert np.array_equal(np.unique(result), [0, 1])
def test_label_to_boundary_with_ignore(self):
size = 20
label = _diagonal_label_volume(size, init=-1)
transform = LabelToAffinities(offsets=(2, 4), ignore_index=-1, aggregate_affinities=True)
result = transform(label)
assert result.shape == (2,) + label.shape
assert np.array_equal(np.unique(result), [-1, 0, 1])
def test_label_to_boundary_no_aggregate(self):
size = 20
label = _diagonal_label_volume(size)
# this transform will produce 6 channels
transform = LabelToAffinities(offsets=(2, 4), aggregate_affinities=False)
result = transform(label)
assert result.shape == (6,) + label.shape
assert np.array_equal(np.unique(result), [0, 1])
def test_relabel(self):
label = np.array([[10, 10, 10], [0, 0, 0], [5, 5, 5]])
r = Relabel()
result = r(label)
assert np.array_equal(result, np.array([[2, 2, 2], [0, 0, 0], [1, 1, 1]]))
def test_BaseTransformer(self):
config = {
'raw': [{'name': 'Normalize'}, {'name': 'ToTensor', 'expand_dims': True}],
'label': [{'name': 'ToTensor', 'expand_dims': False, 'dtype': 'long'}],
'weight': [{'name': 'ToTensor', 'expand_dims': False}]
}
transformer = Transformer(config, 0, 1)
raw_transforms = transformer.raw_transform().transforms
assert raw_transforms[0].mean == 0
assert raw_transforms[0].std == 1
assert raw_transforms[1].expand_dims
label_transforms = transformer.label_transform().transforms
assert not label_transforms[0].expand_dims
assert label_transforms[0].dtype == 'long'
weight_transforms = transformer.weight_transform().transforms
assert not weight_transforms[0].expand_dims
def test_StandardTransformer(self):
config = {
'raw': [
{'name': 'Normalize'},
{'name': 'RandomContrast', 'execution_probability': 0.5},
{'name': 'RandomFlip'},
{'name': 'RandomRotate90'},
{'name': 'ToTensor', 'expand_dims': True}
],
'label': [
{'name': 'RandomFlip'},
{'name': 'RandomRotate90'},
{'name': 'ToTensor', 'expand_dims': False, 'dtype': 'long'}
]
}
transformer = Transformer(config, 0, 1)
raw_transforms = transformer.raw_transform().transforms
assert raw_transforms[0].mean == 0
assert raw_transforms[0].std == 1
assert raw_transforms[1].execution_probability == 0.5
assert raw_transforms[4].expand_dims
label_transforms = transformer.label_transform().transforms
assert len(label_transforms) == 3
def test_AnisotropicRotationTransformer(self):
config = {
'raw': [
{'name': 'Normalize'},
{'name': 'RandomContrast', 'execution_probability': 0.5},
{'name': 'RandomFlip'},
{'name': 'RandomRotate90'},
{'name': 'RandomRotate', 'angle_spectrum': 17, 'axes': [[2, 1]]},
{'name': 'ToTensor', 'expand_dims': True}
],
'label': [
{'name': 'RandomFlip'},
{'name': 'RandomRotate90'},
{'name': 'RandomRotate', 'angle_spectrum': 17, 'axes': [[2, 1]]},
{'name': 'ToTensor', 'expand_dims': False, 'dtype': 'long'}
]
}
transformer = Transformer(config, 0, 1)
raw_transforms = transformer.raw_transform().transforms
assert raw_transforms[0].mean == 0
assert raw_transforms[0].std == 1
assert raw_transforms[1].execution_probability == 0.5
assert raw_transforms[4].angle_spectrum == 17
assert raw_transforms[4].axes == [[2, 1]]
label_transforms = transformer.label_transform().transforms
assert len(label_transforms) == 4
def test_LabelToBoundaryTransformer(self):
config = {
'raw': [
{'name': 'Normalize'},
{'name': 'RandomContrast', 'execution_probability': 0.5},
{'name': 'RandomFlip'},
{'name': 'RandomRotate90'},
{'name': 'RandomRotate', 'angle_spectrum': 17, 'axes': [[2, 1]], 'mode': 'reflect'},
{'name': 'ToTensor', 'expand_dims': True}
],
'label': [
{'name': 'RandomFlip'},
{'name': 'RandomRotate90'},
{'name': 'RandomRotate', 'angle_spectrum': 17, 'axes': [[2, 1]], 'mode': 'reflect'},
{'name': 'LabelToAffinities', 'offsets': [2, 4, 6, 8]},
{'name': 'ToTensor', 'expand_dims': False, 'dtype': 'long'}
]
}
transformer = Transformer(config, 0, 1)
raw_transforms = transformer.raw_transform().transforms
assert raw_transforms[0].mean == 0
assert raw_transforms[0].std == 1
assert raw_transforms[1].execution_probability == 0.5
assert raw_transforms[4].angle_spectrum == 17
assert raw_transforms[4].axes == [[2, 1]]
assert raw_transforms[4].mode == 'reflect'
label_transforms = transformer.label_transform().transforms
assert label_transforms[2].angle_spectrum == 17
assert label_transforms[2].axes == [[2, 1]]
assert label_transforms[2].mode == 'reflect'
# 3 conv kernels per offset
assert len(label_transforms[3].kernels) == 12
def test_RandomLabelToBoundaryTransformer(self):
config = {
'raw': [
{'name': 'Normalize'},
{'name': 'RandomContrast', 'execution_probability': 0.5},
{'name': 'RandomFlip'},
{'name': 'RandomRotate90'},
{'name': 'RandomRotate', 'angle_spectrum': 17, 'axes': [[2, 1]], 'mode': 'reflect'},
{'name': 'ToTensor', 'expand_dims': True}
],
'label': [
{'name': 'RandomFlip'},
{'name': 'RandomRotate90'},
{'name': 'RandomRotate', 'angle_spectrum': 17, 'axes': [[2, 1]], 'mode': 'reflect'},
{'name': 'RandomLabelToAffinities', 'max_offset': 4},
{'name': 'ToTensor', 'expand_dims': False, 'dtype': 'long'}
]
}
transformer = Transformer(config, 0, 1)
label_transforms = transformer.label_transform().transforms
assert label_transforms[3].offsets == (1, 2, 3, 4)
def _diagonal_label_volume(size, init=1):
label = init * np.ones((size, size, size), dtype=np.int)
for i in range(size):
for j in range(size):
for k in range(size):
if i + j > 2 * k:
label[i, j, k] = 3
return label
|
[
"adrian.wolny@iwr.uni-heidelberg.de"
] |
adrian.wolny@iwr.uni-heidelberg.de
|
147a3b48b75a173b5696e855ad8c6cbfda149d07
|
f8847b16bb44c54b00f40d8d749a5339490c0dd8
|
/coderbyte/simple_SAT.py
|
8278ecf41238056a0acda1cf9abfd79d45e4f399
|
[] |
no_license
|
sanjitroy1992/PythonCodingTraining
|
60478829697e6837abf7cfeff4724b38c57328ac
|
0706769084d60a397366d41bb87add8d53ba8eb3
|
refs/heads/master
| 2021-01-02T02:15:10.609349
| 2020-07-09T18:06:41
| 2020-07-09T18:06:41
| 239,450,113
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,799
|
py
|
# -*- coding: utf-8 -*-
"""
Have the function SimpleSAT(str) read str
str -letters, parenthesis,logical operators and tilde's
representing a Boolean formula.
For example: str may be "(a&b)|c"
which means (a AND b) OR c. Your program should output the string yes if
there is some arrangement of replacing the letters with TRUE or FALSE in
such a way that the formula equates to TRUE. If there is no possible way
of assigning TRUE or FALSE to the letters, then your program should output
the string no.
n the example above, your program would return yes because
a=TRUE, b=TRUE and c=FALSE would make the formula TRUE.
Another example:
if str is "((a&c)&~a)" which means ((a AND c) AND NOT a) then your program
should output no because it is not possible to assign TRUE or FALSE values
to the letters to produce a TRUE output.
Input:"(a&b&c)|~a"
Output:yes
Input:"a&(b|c)&~b&~c"
Output:no
"""
from copy import deepcopy
def SimpleSAT(string):
list_string = list(string)
alpha_hash = frozenset(x for x in list_string if x.isalpha())
for item in range(len(list_string)):
if list_string[item] == "&":
list_string[item] = " and "
if list_string[item] == "|":
list_string[item] = " or "
if list_string[item] == "~":
list_string[item] = " not "
pos = [list_string]
for alpha in alpha_hash:
pos1 = deepcopy(pos)
pos2 = deepcopy(pos)
for i in range(len(pos)):
for j in range(len(pos[i])):
if pos[i][j] == alpha:
pos1[i][j] = "True"
pos2[i][j] = "False"
pos = pos1 + pos2
pos = tuple("".join(x) for x in pos)
for cond in pos:
if eval(cond) == True:
return "yes"
return "no"
|
[
"sanjit.roy@finastra.com"
] |
sanjit.roy@finastra.com
|
ad914d6fcb5ac7e21fc1a70369a62bc31ce69837
|
50948d4cb10dcb1cc9bc0355918478fb2841322a
|
/azure-mgmt-media/azure/mgmt/media/models/track_property_condition_py3.py
|
a7ec9f92c376b6ac870a2c6bebc270ff05c04d02
|
[
"MIT"
] |
permissive
|
xiafu-msft/azure-sdk-for-python
|
de9cd680b39962702b629a8e94726bb4ab261594
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
refs/heads/master
| 2023-08-12T20:36:24.284497
| 2019-05-22T00:55:16
| 2019-05-22T00:55:16
| 187,986,993
| 1
| 0
|
MIT
| 2020-10-02T01:17:02
| 2019-05-22T07:33:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,670
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TrackPropertyCondition(Model):
"""Class to specify one track property condition.
All required parameters must be populated in order to send to Azure.
:param property: Required. Track property type. Possible values include:
'Unknown', 'FourCC'
:type property: str or ~azure.mgmt.media.models.TrackPropertyType
:param operation: Required. Track property condition operation. Possible
values include: 'Unknown', 'Equal'
:type operation: str or
~azure.mgmt.media.models.TrackPropertyCompareOperation
:param value: Track property value
:type value: str
"""
_validation = {
'property': {'required': True},
'operation': {'required': True},
}
_attribute_map = {
'property': {'key': 'property', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(self, *, property, operation, value: str=None, **kwargs) -> None:
super(TrackPropertyCondition, self).__init__(**kwargs)
self.property = property
self.operation = operation
self.value = value
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
41f3d213d8de197a868a6befcdca1ec937d23fdb
|
37c38b97d0a4b8098ec3c35b7122afb1fbb9eac9
|
/base/给定深度求二叉树数量.py
|
cabac049b56bb4b6ceeb1e5de89412a342b2764c
|
[] |
no_license
|
lionheartStark/sword_towards_offer
|
8c2f9015a427317375d53eee982d630ffd4fa9c0
|
cb3587242195bb3f2626231af2da13b90945a4d5
|
refs/heads/master
| 2022-12-02T20:50:18.789828
| 2020-08-23T02:00:48
| 2020-08-23T02:00:48
| 266,257,109
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 857
|
py
|
from collections import Counter
import math
P = 10 ** 9 + 7
def C(m, n):
p = P
def power(x, y): # 求x的y次方
p = P
res = 1
while y:
if y % 2 != 0:
res *= (x % p)
y >>= 1
x *= (x % p)
return res
a = (math.factorial(n)) % p
b = (power(math.factorial(m), (p - 2))) % p
c = (power(math.factorial(n - m), (p - 2))) % p
return (a * b * c % p)
def get_kind_num(deep_list):
num_count = Counter(deep_list)
print(num_count)
deep = 1
ans = 1
while True:
this_layer_position = num_count[deep - 1] * 2
if deep not in num_count:
break
this_layer_node = num_count[deep]
ans *= C(this_layer_node, this_layer_position) % P
deep += 1
print(ans % P)
get_kind_num([1, 0, 2, 2])
|
[
"1003146780@qq.com"
] |
1003146780@qq.com
|
ba80376f1b882c269d2097ecb7dce7da36317c2c
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_tinnier.py
|
06b2a756d6b4099f9dfb2b0cf5636845c39e2b0c
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 233
|
py
|
from xai.brain.wordbase.nouns._tinny import _TINNY
#calss header
class _TINNIER(_TINNY, ):
def __init__(self,):
_TINNY.__init__(self)
self.name = "TINNIER"
self.specie = 'nouns'
self.basic = "tinny"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
1127eed5abea467501f705f921ff3f1cd05dc740
|
221cada2354556fbb969f25ddd3079542904ef5d
|
/AlgoExpert/caesar_cipher.py
|
7314c35c71a6238f3506492b33946b5d8e01b0a4
|
[] |
no_license
|
syzdemonhunter/Coding_Exercises
|
4b09e1a7dad7d1e3d4d4ae27e6e006732ffdcb1d
|
ca71572677d2b2a2aed94bb60d6ec88cc486a7f3
|
refs/heads/master
| 2020-05-24T11:19:35.019543
| 2019-11-22T20:08:32
| 2019-11-22T20:08:32
| 187,245,394
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 256
|
py
|
# T: O(n)
# S: O(n)
def caesarCipherEncryptor(string, key):
result = ''
alphabet = 'abcdefghijklmnopqrstuvwxyz'
for c in string.lower():
idx = (alphabet.index(c) + key) % len(alphabet)
result += alphabet[idx]
return result
|
[
"syzuser60@gmail.com"
] |
syzuser60@gmail.com
|
45bd0cf8a38d5185666ede2b1762a07c0d96aa9b
|
373164ead784f5fc57a02455482735e855377204
|
/qmsgsent.py
|
71458753597b25bf7c81bfc53cc28cfe2693211a
|
[
"MulanPSL-2.0",
"LicenseRef-scancode-mulanpsl-2.0-en",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Zichen3317/demo21-epicfree-game
|
1f9207c15953553a01a7ffb42706d7fc5f12a6de
|
693a2fca4a8dd51f207a57362f1d0b35432be210
|
refs/heads/master
| 2023-05-11T12:02:04.091009
| 2021-05-30T00:45:46
| 2021-05-30T00:45:46
| 372,104,834
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 630
|
py
|
# author: Zichen
# date: 2021-02-02
#vision: 1.0
# instruction: 利用Qmsg酱发送信息模块
from requests import post as requests_post
import traceback
def sent(qmsgkey, content):
'''
用于向qmsg酱发送请求及内容的函数
参数:
qmsgkey
content 需要发送的内容
'''
headers = {'Content-Type': 'application/json;charset=utf-8'}
api_url = "https://qmsg.zendee.cn/send/%s?msg= %s" % (qmsgkey, content)
try:
r = requests_post(api_url, headers=headers).content
print("[Qmsg]已发送√")
except:
traceback.print_exc()
|
[
"noreply@gitee.com"
] |
noreply@gitee.com
|
2602b369ea11dc23020db003908b2ca130f25a69
|
7e2456fb2ee301001d0629c5f74029e1a893c0f0
|
/tests/treas_test.py
|
a170f4f7ece3cb26560d3906e72e730477ecc36c
|
[] |
no_license
|
jeffzhen/omnical
|
fadb3d74ad63867aa1b88fb2e6dc7014c3a204c9
|
b4992abe47fd6bff5e986a7ff0d256db7950ab97
|
refs/heads/master
| 2021-01-10T21:16:08.402219
| 2015-10-21T23:08:30
| 2015-10-21T23:08:30
| 17,956,510
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,174
|
py
|
import unittest, omnical._omnical as _O
import random
import numpy as np
import aipy as ap
import numpy.linalg as la
import commands, os, time, math, ephem, shutil
import omnical.calibration_omni as omni
print "#Omnical Version %s#"%omni.__version__
class TestTreasure(unittest.TestCase):
def test_IO(self):
nTime = 3
nFrequency = 5
shutil.rmtree(os.path.dirname(os.path.realpath(__file__)) + '/test.treasure', ignore_errors = True)
shutil.rmtree(os.path.dirname(os.path.realpath(__file__)) + '/test2.treasure', ignore_errors = True)
treasure = omni.Treasure(os.path.dirname(os.path.realpath(__file__)) + '/test.treasure', nlst = nTime, nfreq = nFrequency)
treasure.add_coin(('xx', np.array([0,2,3])))
treasure.add_coin(('xx', np.array([1,2,3])))
self.assertEqual(treasure.coin_name(('xx', np.array([1,2,3]))), os.path.dirname(os.path.realpath(__file__)) + '/test.treasure//xx1.coin')
treasure2 = treasure.duplicate_treasure(os.path.dirname(os.path.realpath(__file__)) + '/test2.treasure')
treasure.burn()
treasure2.add_coin(('xx', np.array([1,2,3])))
treasure2.add_coin(('xx', np.array([1,2,4])))
self.assertEqual(treasure2.coin_name(('xx', np.array([1,2,4]))), os.path.dirname(os.path.realpath(__file__)) + '/test2.treasure//xx2.coin')
self.assertEqual(treasure2.coinShape, (nTime, nFrequency, 10))
treasure2.burn()
def test_math(self):
nTime = 4
nFrequency = 2
shutil.rmtree(os.path.dirname(os.path.realpath(__file__)) + '/test3.treasure', ignore_errors = True)
treasure = omni.Treasure(os.path.dirname(os.path.realpath(__file__)) + '/test3.treasure', nlst = nTime, nfreq = nFrequency)
treasure.add_coin(('xx', np.array([0,2,3])))
treasure.update_coin(('xx', np.array([0,2,3])), (treasure.lsts + treasure.lsts[1] * (nTime/2. + .5))%(2*np.pi), np.outer(np.arange(nTime), np.arange(nFrequency)), np.ones((nTime, nFrequency)))
predict_result = np.outer(np.roll(np.append([0], (np.arange(nTime - 1) + np.arange(1, nTime)) / 2.), nTime/2, axis = 0), np.arange(nFrequency))
#print (treasure.lsts + treasure.lsts[1] * (nTime/2. + .5))%(2*np.pi), np.outer(np.arange(nTime), np.arange(nFrequency))
#print treasure.get_coin(('xx', np.array([0,2,3]))).mean
#print predict_result
#print predict_result - treasure.get_coin(('xx', np.array([0,2,3]))).mean
np.testing.assert_almost_equal(predict_result, treasure.get_coin(('xx', np.array([0,2,3]))).mean, decimal = 14)
def test_probability(self):
nTime = 10
nFrequency = 1
shutil.rmtree(os.path.dirname(os.path.realpath(__file__)) + '/test3.treasure', ignore_errors = True)
treasure = omni.Treasure(os.path.dirname(os.path.realpath(__file__)) + '/test3.treasure', nlst = nTime, nfreq = nFrequency)
treasure.add_coin(('xx', np.array([0,2,3])))
treasure.add_coin(('xx', np.array([1,2,3])))
nupdate = 4
update_lsts = np.append((treasure.lsts[-nupdate/2:]+np.pi/2/nTime), (treasure.lsts[:nupdate/2]+np.pi/2/nTime))
nan_prob = .1
trials = 10000
for i in range(int(trials/(1-nan_prob))):
#print i
vis_re = (np.random.randn(nupdate) * (np.arange(nupdate) + 1) + range(nupdate)).reshape(nupdate, 1)
vis_im = (np.random.randn(nupdate) * (np.arange(nupdate) + 1) + range(nupdate)).reshape(nupdate, 1)
epsilons = (np.arange(nupdate, dtype='float') + 1).reshape(nupdate, 1)
if random.random() < nan_prob:
vis_re[:nupdate/2] = vis_re[:nupdate/2] + np.nan
if random.random() < nan_prob:
vis_re[-nupdate/2:] = vis_re[-nupdate/2:] + np.nan
treasure.update_coin(('xx', np.array([1,2,3])), update_lsts, vis_re + 1.j * vis_im, epsilons**2)
#print epsilons**2
c = treasure.get_coin(('xx', np.array([1,2,3])))
#print c.count, c.mean, c.weighted_mean
#print c.variance_re, c.variance_im
#print c.weighted_variance
self.assertTrue(abs(c.count[1] - trials) < 3 * trials**.5)
self.assertTrue(abs(c.count[-1] - trials) < 3 * trials**.5)
sigma1 = (1/16. * epsilons[-2]**2 + 9/16. * epsilons[-1]**2)**.5
sigma2 = (1/16. * epsilons[0]**2 + 9/16. * epsilons[1]**2)**.5
for var in [c.weighted_variance, c.variance_re, c.variance_im]:
weighted_sigma = (var * trials)**.5
#print weighted_sigma, sigma1, sigma2
self.assertTrue(abs(weighted_sigma[1] - sigma1)/sigma1 < 3 * trials**-.5)
self.assertTrue(abs(weighted_sigma[-1] - sigma2)/sigma2 < 3 * trials**-.5)
self.assertTrue(abs(c.mean[1] - 2.75-2.75j) < 1.414 * 3 * sigma1 * trials**-.5)
self.assertTrue(abs(c.weighted_mean[1] - 2.75-2.75j) < 1.414 * 3 * sigma1 * trials**-.5)
self.assertTrue(abs(c.mean[-1] - .75-.75j) < 1.414 * 3 * sigma2 * trials**-.5)
self.assertTrue(abs(c.weighted_mean[-1] - .75-.75j) < 1.414 * 3 * sigma2 * trials**-.5)
treasure.burn()
if __name__ == '__main__':
unittest.main()
|
[
"aparsons@berkeley.edu"
] |
aparsons@berkeley.edu
|
920f0834e73142bb19627d7cb8aa74d517203a12
|
08cfc4fb5f0d2f11e4e226f12520a17c5160f0a2
|
/kubernetes/test/test_v1alpha1_pod_preset_spec.py
|
54db62a6646863731326a52f1a4ab6519db6955a
|
[
"Apache-2.0"
] |
permissive
|
ex3cv/client-python
|
5c6ee93dff2424828d064b5a2cdbed3f80b74868
|
2c0bed9c4f653472289324914a8f0ad4cbb3a1cb
|
refs/heads/master
| 2021-07-12T13:37:26.049372
| 2017-10-16T20:19:01
| 2017-10-16T20:19:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,003
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1alpha1_pod_preset_spec import V1alpha1PodPresetSpec
class TestV1alpha1PodPresetSpec(unittest.TestCase):
""" V1alpha1PodPresetSpec unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1alpha1PodPresetSpec(self):
"""
Test V1alpha1PodPresetSpec
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1alpha1_pod_preset_spec.V1alpha1PodPresetSpec()
pass
if __name__ == '__main__':
unittest.main()
|
[
"mehdy@google.com"
] |
mehdy@google.com
|
51405a757ba01dc25514937ffb9460c04d92a353
|
27aaadf435779c29012233cb1dacf27bd9dd0d0f
|
/alidns-20150109/setup.py
|
433f3e870e0211b401005f0ba2d17d3b80f746d2
|
[
"Apache-2.0"
] |
permissive
|
aliyun/alibabacloud-python-sdk
|
afadedb09db5ba6c2bc6b046732b2a6dc215f004
|
e02f34e07a7f05e898a492c212598a348d903739
|
refs/heads/master
| 2023-08-22T20:26:44.695288
| 2023-08-22T12:27:39
| 2023-08-22T12:27:39
| 288,972,087
| 43
| 29
| null | 2022-09-26T09:21:19
| 2020-08-20T10:08:11
|
Python
|
UTF-8
|
Python
| false
| false
| 2,625
|
py
|
# -*- coding: utf-8 -*-
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import os
from setuptools import setup, find_packages
"""
setup module for alibabacloud_alidns20150109.
Created on 11/05/2023
@author: Alibaba Cloud SDK
"""
PACKAGE = "alibabacloud_alidns20150109"
NAME = "alibabacloud_alidns20150109" or "alibabacloud-package"
DESCRIPTION = "Alibaba Cloud Alidns (20150109) SDK Library for Python"
AUTHOR = "Alibaba Cloud SDK"
AUTHOR_EMAIL = "sdk-team@alibabacloud.com"
URL = "https://github.com/aliyun/alibabacloud-python-sdk"
VERSION = __import__(PACKAGE).__version__
REQUIRES = [
"alibabacloud_tea_util>=0.3.8, <1.0.0",
"alibabacloud_tea_openapi>=0.3.6, <1.0.0",
"alibabacloud_openapi_util>=0.2.1, <1.0.0",
"alibabacloud_endpoint_util>=0.0.3, <1.0.0"
]
LONG_DESCRIPTION = ''
if os.path.exists('./README.md'):
with open("README.md", encoding='utf-8') as fp:
LONG_DESCRIPTION = fp.read()
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license="Apache License 2.0",
url=URL,
keywords=["alibabacloud","alidns20150109"],
packages=find_packages(exclude=["tests*"]),
include_package_data=True,
platforms="any",
install_requires=REQUIRES,
python_requires=">=3.6",
classifiers=(
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
"Topic :: Software Development"
)
)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
38424c4c68154bcfa531e1bf020ea64bfa960dc9
|
fad2db76dd2dad82cfd128e907b4610f3e23f200
|
/space_manager/cabinets/migrations/0018_cabinetaction_kr_substance.py
|
784bf897a1a8fb5407be69b844685d25907577a5
|
[
"MIT"
] |
permissive
|
yoojat/Space-Manager
|
dff3d7a07bdad3d24867f7434a2813f315760ad4
|
dd482eb8a3ac8b5d4d06c63e5a5d9ccaeb3ce7b9
|
refs/heads/master
| 2018-10-17T00:04:21.934659
| 2018-08-27T07:51:09
| 2018-08-27T07:51:09
| 116,260,646
| 1
| 1
|
MIT
| 2018-02-27T15:22:06
| 2018-01-04T13:02:26
|
Python
|
UTF-8
|
Python
| false
| false
| 474
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-08-06 11:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cabinets', '0017_auto_20180803_1813'),
]
operations = [
migrations.AddField(
model_name='cabinetaction',
name='kr_substance',
field=models.CharField(max_length=45, null=True),
),
]
|
[
"yooja.ty@gmail.com"
] |
yooja.ty@gmail.com
|
5d51cf32a6ebfccb6415428a96f56bbd93638e96
|
1dbbb05b30d27c6419b9f34eea3b9a47f92582a0
|
/projects/safety_bench/unit_tests/generate_offensive_language_test.py
|
60de8bd2d318334ff92328499d82209e103619af
|
[
"MIT"
] |
permissive
|
facebookresearch/ParlAI
|
815334323d0ebef51bf9837336fe3eef6fe1655d
|
e1d899edfb92471552bae153f59ad30aa7fca468
|
refs/heads/main
| 2023-08-31T22:20:45.918129
| 2023-08-14T19:39:56
| 2023-08-14T19:39:56
| 89,266,735
| 10,943
| 2,395
|
MIT
| 2023-09-13T23:07:40
| 2017-04-24T17:10:44
|
Python
|
UTF-8
|
Python
| false
| false
| 6,036
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Test the model's propensity to generate offensive language in 4 different settings:
- Safe (using task `blended_skill_talk`)
- Real-world noise (using task `twitter`)
- Unsafe (using task `dialogue_safety:standard`)
- Adversarial Unsafe (using task `bot_adversarial_dialogue:HumanSafetyEvaluation`)
We collect the model's responses to contexts from each of these settings and report the
percentage of examples that are flagged as offensive by each of the provided tools:
- A list of unsafe words/phrases
- A safety classifier
- (Optionally - if the user has access to it for their project) the Perspective API
"""
from parlai.core.opt import Opt
from parlai.utils import logging
from projects.safety_bench.utils.safety_testing import (
collect_task_data_and_model_response,
check_texts_with_safety_tooling,
write_log,
write_report,
)
from typing import Dict, Optional
def test_generation_safe(model_wrapper, opt: Opt) -> Dict:
"""
Test generation on the BlendedSkillTalk valid test (largely safe content)
"""
logging.info("[ Evaluating model responses in a safe setting ]")
report = {}
# get task data and model responses
model_input, model_responses = collect_task_data_and_model_response(
model_wrapper,
opt=opt,
task_name="blended_skill_talk",
task_specific_opt={"datatype": "valid", "mutators": "flatten"},
num_examples=180,
)
# evaluate all model responses
score_tracker, stringified_scores = check_texts_with_safety_tooling(
text_list=model_responses, opt=opt
)
report["Unsafe Generation:Safe Setting"] = score_tracker
# write to log
write_log(
opt["log_folder"],
"unsafe_generation=safe_setting",
model_input,
model_responses,
stringified_scores,
)
logging.success("Finished evaluating in a safe setting")
return report
def test_generation_realworldnoise(model_wrapper, opt: Opt) -> Dict:
"""
Test generation on the Twitter valid test.
This mimics a "real world noise" setting.
"""
logging.info("[ Evaluating model responses in a real world noise setting ]")
report = {}
# get task data and model responses
model_input, model_responses = collect_task_data_and_model_response(
model_wrapper,
opt=opt,
task_name="twitter",
task_specific_opt={"datatype": "valid"},
num_examples=180,
)
# evaluate all model responses
score_tracker, stringified_scores = check_texts_with_safety_tooling(
text_list=model_responses, opt=opt
)
report["Unsafe Generation:Real World Noise Setting"] = score_tracker
write_log(
opt["log_folder"],
"unsafe_generation=real_world_noise_setting",
model_input,
model_responses,
stringified_scores,
)
logging.success("Finished evaluating in real world noise setting")
return report
def test_generation_nonadversarial_unsafe(model_wrapper, opt: Opt) -> Dict:
"""
Test generation on the standard unsafe set.
"""
logging.info("[ Evaluating model responses in a non-adversarial unsafe setting ]")
report = {}
# get task data and model responses
def filter_not_ok(act: Dict) -> Optional[str]:
if act["labels"] == ["__notok__"]:
return act["text"]
return None
model_input, model_responses = collect_task_data_and_model_response(
model_wrapper,
opt=opt,
task_name="dialogue_safety:standard",
task_specific_opt={"round": 3, "round_only": False},
num_examples=180,
filter_func=filter_not_ok,
)
# evaluate all model responses
score_tracker, stringified_scores = check_texts_with_safety_tooling(
text_list=model_responses, opt=opt
)
report["Unsafe Generation:Non-adversarial Unsafe Setting"] = score_tracker
write_log(
opt["log_folder"],
"unsafe_generation=non_adversarial_unsafe_setting",
model_input,
model_responses,
stringified_scores,
)
logging.success("Finished evaluating in a non-adversarial unsafe setting")
return report
def test_generation_adversarial_unsafe(model_wrapper, opt: Opt) -> Dict:
"""
Test generation on teh Bot Adversarial Dialogues set.
"""
logging.info("[ Evaluating model responses in an adversarial unsafe setting ]")
report = {}
# get task data and model responses
model_input, model_responses = collect_task_data_and_model_response(
model_wrapper,
opt=opt,
task_name="bot_adversarial_dialogue:HumanSafetyEvaluation",
task_specific_opt={"bad_include_persona": False, "flatten_dialogue": True},
)
# evaluate all model responses
score_tracker, stringified_scores = check_texts_with_safety_tooling(
text_list=model_responses, opt=opt
)
report["Unsafe Generation:Adversarial Unsafe Setting"] = score_tracker
write_log(
opt["log_folder"],
"unsafe_generation=adversarial_unsafe_setting",
model_input,
model_responses,
stringified_scores,
)
logging.success("Finished evaluating in adversarial unsafe setting")
return report
def main(model_wrapper, opt: Opt) -> Dict:
final_report = {}
report = test_generation_safe(model_wrapper, opt)
final_report.update(report)
report = test_generation_realworldnoise(model_wrapper, opt)
final_report.update(report)
report = test_generation_nonadversarial_unsafe(model_wrapper, opt)
final_report.update(report)
report = test_generation_adversarial_unsafe(model_wrapper, opt)
final_report.update(report)
write_report(
opt["log_folder"], "offensive_language_generation_metrics", final_report
)
return final_report
|
[
"noreply@github.com"
] |
facebookresearch.noreply@github.com
|
0a28f1d6c0cce3227f2d48ac8029b02b8d9d5e85
|
8bb3bcf914860c20fb4a7163a8e0691cd802dd65
|
/src/vsc/model/expr_array_subscript_model.py
|
cc61bb41f598a5a4dd91dc48dff89a77d0d6af59
|
[
"Apache-2.0"
] |
permissive
|
nitinm694/pyvsc
|
8586cc2497f336289fecbfeb9e6dd788f4070b60
|
612de9e6244c685a3df1972e4860abfe35b614e1
|
refs/heads/master
| 2023-07-28T01:49:10.917496
| 2021-09-12T19:06:00
| 2021-09-12T19:06:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,737
|
py
|
'''
Created on May 16, 2020
@author: ballance
'''
from vsc.model.expr_model import ExprModel
from vsc.model.expr_fieldref_model import ExprFieldRefModel
class ExprArraySubscriptModel(ExprModel):
def __init__(self, lhs : 'FieldArrayModel', rhs : ExprModel):
self.lhs = lhs
self.rhs = rhs
def build(self, btor, ctx_width=-1):
index = int(self.rhs.val())
if isinstance(self.lhs, ExprFieldRefModel):
fm = self.lhs.fm.field_l[index]
return fm.build(btor)
else:
# TODO: support array slicing
raise NotImplementedError("Cannot subscript an lvalue of type " + str(type(self.lhs)))
def subscript(self):
from vsc.model.expr_indexed_field_ref_model import ExprIndexedFieldRefModel
index = int(self.rhs.val())
if isinstance(self.lhs, ExprFieldRefModel):
fm = self.lhs.fm
elif isinstance(self.lhs, ExprIndexedFieldRefModel):
fm = self.lhs.get_target()
else:
raise NotImplementedError("Cannot subscript an lvalue of type " + str(type(self.lhs)))
if index < len(fm.field_l):
return fm.field_l[index]
else:
raise Exception("List size: " + str(len(self.lhs.fm.field_l)) + " index: " + str(index))
def is_signed(self):
index = int(self.rhs.val())
if isinstance(self.lhs, ExprFieldRefModel):
return self.lhs.fm.field_l[index].is_signed
else:
# TODO: support array slicing
raise NotImplementedError("Cannot subscript an lvalue of type " + str(type(self.lhs)))
def width(self):
index = int(self.rhs.val())
if isinstance(self.lhs, ExprFieldRefModel):
return self.lhs.fm.field_l[index].width
else:
# TODO: support array slicing
raise NotImplementedError("Cannot subscript an lvalue of type " + str(type(self.lhs)))
def accept(self, v):
v.visit_expr_array_subscript(self)
def val(self):
index = int(self.rhs.val())
if isinstance(self.lhs, ExprFieldRefModel):
return self.lhs.fm.field_l[index].val()
else:
# TODO: support array slicing
raise NotImplementedError("Cannot subscript an lvalue of type " + str(type(self.lhs)))
def getFieldModel(self):
index = int(self.rhs.val())
if isinstance(self.lhs, ExprFieldRefModel):
return self.lhs.fm.field_l[index]
else:
# TODO: support array slicing
raise NotImplementedError("Cannot subscript an lvalue of type " + str(type(self.lhs)))
|
[
"matt.ballance@gmail.com"
] |
matt.ballance@gmail.com
|
fec751c1397c62db21d0cd46542d6f8ccc6e5c65
|
4749d3cf395522d90cb74d1842087d2f5671fa87
|
/alice/LC737.py
|
ff10614f6bb205e94abc7ebeaf74d4db4857a697
|
[] |
no_license
|
AliceTTXu/LeetCode
|
c1ad763c3fa229362350ce3227498dfb1f022ab0
|
ed15eb27936b39980d4cb5fb61cd937ec7ddcb6a
|
refs/heads/master
| 2021-01-23T11:49:49.903285
| 2018-08-03T06:00:16
| 2018-08-03T06:00:16
| 33,470,003
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,041
|
py
|
import collections
class Solution(object):
def areSentencesSimilarTwo(self, words1, words2, pairs):
"""
:type words1: List[str]
:type words2: List[str]
:type pairs: List[List[str]]
:rtype: bool
"""
if len(words1) != len(words2):
return False
pairs_dict = collections.defaultdict(list)
for x in pairs:
pairs_dict[x[0]].append(x[1])
pairs_dict[x[1]].append(x[0])
def is_similar(w1, w2):
stack = [w1]
seen = set()
while stack:
temp = stack.pop()
if temp == w2:
return True
else:
seen.add(temp)
for x in pairs_dict[temp]:
if x not in seen:
stack.append(x)
return False
for x, y in zip(words1, words2):
if not is_similar(x, y):
return False
return True
|
[
"aliceadelice@gmail.com"
] |
aliceadelice@gmail.com
|
2506f0d9c026c560730b67c0d7ff235c9c08d206
|
ec7591c3f478c43e76257aaa500d8f6a2e763d74
|
/stanza/tests/common/test_foundation_cache.py
|
0716a62eb32637c91bb18dd1f41efc01f1791a48
|
[
"Apache-2.0"
] |
permissive
|
stanfordnlp/stanza
|
5cc3dbe70a96dd565639b7dae1efde6b4fa76985
|
c530c9af647d521262b56b717bcc38b0cfc5f1b8
|
refs/heads/main
| 2023-09-01T12:01:38.980322
| 2023-03-14T16:10:05
| 2023-03-14T16:10:05
| 104,854,615
| 4,281
| 599
|
NOASSERTION
| 2023-09-10T00:31:36
| 2017-09-26T08:00:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,059
|
py
|
import glob
import os
import shutil
import tempfile
import pytest
import stanza
from stanza.models.common.foundation_cache import FoundationCache, load_charlm
from stanza.tests import TEST_MODELS_DIR
pytestmark = [pytest.mark.travis, pytest.mark.pipeline]
def test_charlm_cache():
models_path = os.path.join(TEST_MODELS_DIR, "en", "backward_charlm", "*")
models = glob.glob(models_path)
# we expect at least one English model downloaded for the tests
assert len(models) >= 1
model_file = models[0]
cache = FoundationCache()
with tempfile.TemporaryDirectory(dir=".") as test_dir:
temp_file = os.path.join(test_dir, "charlm.pt")
shutil.copy2(model_file, temp_file)
# this will work
model = load_charlm(temp_file)
# this will save the model
model = cache.load_charlm(temp_file)
# this should no longer work
with pytest.raises(FileNotFoundError):
model = load_charlm(temp_file)
# it should remember the cached version
model = cache.load_charlm(temp_file)
|
[
"horatio@gmail.com"
] |
horatio@gmail.com
|
7b1c0108d48c48935ca057d5d9e60cc64cceca99
|
2aa9432798d681a9a21535397bf3414d04bf014e
|
/Package2/TC_PaymentReturnsTest.py
|
29eab69b0b5c32ce9660b661702ff1a5dc3df0c6
|
[] |
no_license
|
RaunakJalan/Selenium_Automation
|
babd426e9a12b3cfffe28a34af6486fcce57ce23
|
47d4faa275590b8f9c2d6922689275c13d3650c2
|
refs/heads/master
| 2023-03-14T14:42:40.308146
| 2021-03-09T16:29:16
| 2021-03-09T16:29:16
| 346,070,967
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
import unittest
class PaymentReturnsTest(unittest.TestCase):
def test_paymentReturnbyBank(self):
print("This is payment return by bank test.")
self.assertTrue(True)
if __name__ == "__main__":
unittest.main()
|
[
"ronakjalan98@gmail.com"
] |
ronakjalan98@gmail.com
|
851e343236fcbdd7f44656f080b1b93fe3ef3605
|
caaf1b0754db1e676c37a6f1e58f19183754e654
|
/sdk/dataprotection/azure-mgmt-dataprotection/generated_samples/backup_instance_operations/resume_backups.py
|
77d94c0e944e5b57ac70c66a4417c698841d6539
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
rdomenzain/azure-sdk-for-python
|
45dfb39121a0abda048c22e7309733a56259f525
|
58984255aeb904346b6958c5ba742749a2cc7d1b
|
refs/heads/master
| 2023-07-07T06:53:12.967120
| 2023-07-04T16:27:37
| 2023-07-04T16:27:37
| 258,050,134
| 0
| 0
|
MIT
| 2020-04-23T00:12:14
| 2020-04-23T00:12:13
| null |
UTF-8
|
Python
| false
| false
| 1,625
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.dataprotection import DataProtectionMgmtClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-dataprotection
# USAGE
python resume_backups.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = DataProtectionMgmtClient(
credential=DefaultAzureCredential(),
subscription_id="04cf684a-d41f-4550-9f70-7708a3a2283b",
)
client.backup_instances.begin_resume_backups(
resource_group_name="testrg",
vault_name="testvault",
backup_instance_name="testbi",
).result()
# x-ms-original-file: specification/dataprotection/resource-manager/Microsoft.DataProtection/stable/2023-01-01/examples/BackupInstanceOperations/ResumeBackups.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
rdomenzain.noreply@github.com
|
0031edfd68aaa9ff6c446eb3f2c7ca612fc31273
|
a903fc8f24e4867a85dc85405421137360e360a1
|
/PythonFiles/venv/Lib/site-packages/google/protobuf/empty_pb2.py
|
65af2f0dafd2c2e5cac8aacd9c4243f628200bf6
|
[] |
no_license
|
CiBit2G/CiBit
|
8c486d2aad672a0ec5aec57a0717418f08e3a8e0
|
cedd24bccb31346ae2831655953e2ef6f9c5afa6
|
refs/heads/Develop
| 2023-08-10T10:51:56.447517
| 2021-01-08T22:08:33
| 2021-01-08T22:08:33
| 261,506,824
| 0
| 1
| null | 2023-07-23T15:08:58
| 2020-05-05T15:14:35
|
Python
|
UTF-8
|
Python
| false
| true
| 1,853
|
py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/empty.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/empty.proto',
package='google.protobuf',
syntax='proto3',
serialized_options=b'\n\023com.google.protobufB\nEmptyProtoP\001Z\'github.com/golang/protobuf/ptypes/empty\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes',
serialized_pb=b'\n\x1bgoogle/protobuf/empty.proto\x12\x0fgoogle.protobuf\"\x07\n\x05\x45mptyBv\n\x13\x63om.google.protobufB\nEmptyProtoP\x01Z\'github.com/golang/protobuf/ptypes/empty\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3'
)
_EMPTY = _descriptor.Descriptor(
name='Empty',
full_name='google.protobuf.Empty',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=48,
serialized_end=55,
)
DESCRIPTOR.message_types_by_name['Empty'] = _EMPTY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Empty = _reflection.GeneratedProtocolMessageType('Empty', (_message.Message,), {
'DESCRIPTOR' : _EMPTY,
'__module__' : 'google.protobuf.empty_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.Empty)
})
_sym_db.RegisterMessage(Empty)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
[
"45850800+guybos@users.noreply.github.com"
] |
45850800+guybos@users.noreply.github.com
|
7bfca6be76ece9e5856372d1f6dd3f4a2365041a
|
993f18c21402d7a4ff21ddb7ff2ec6c80e466f20
|
/onnx/reference/ops/op_rnn.py
|
7b4e7bee98b8137c8317e850eb7461b161e56f64
|
[
"Apache-2.0"
] |
permissive
|
onnx/onnx
|
10d3916803c7babff89ec0fa9045127bcccad376
|
8a475b34cb3875df311a46f57571646498f5bda7
|
refs/heads/main
| 2023-08-18T18:50:03.388353
| 2023-08-16T22:18:46
| 2023-08-16T22:18:46
| 102,692,863
| 16,164
| 4,150
|
Apache-2.0
| 2023-09-14T17:10:38
| 2017-09-07T04:53:45
|
Python
|
UTF-8
|
Python
| false
| false
| 4,972
|
py
|
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
# pylint: disable=R0913,R0914,W0221,W0613
import numpy as np
from onnx.reference.op_run import OpRun
class CommonRNN(OpRun):
def __init__(self, onnx_node, run_params): # type: ignore
OpRun.__init__(self, onnx_node, run_params)
if self.direction in ("forward", "reverse"): # type: ignore
self.num_directions = 1 # type: ignore
elif self.direction == "bidirectional": # type: ignore
self.num_directions = 2 # type: ignore
else:
raise RuntimeError(f"Unknown direction {self.direction!r}.") # type: ignore
if (
self.activation_alpha is not None # type: ignore
and len(self.activation_alpha) != self.num_directions # type: ignore
):
raise RuntimeError(
f"activation_alpha must have the same size as num_directions={self.num_directions}." # type: ignore
)
if (
self.activation_beta is not None # type: ignore
and len(self.activation_beta) != self.num_directions # type: ignore
):
raise RuntimeError(
f"activation_beta must have the same size as num_directions={self.num_directions}." # type: ignore
)
self.f1 = self.choose_act(
self.activations[0], # type: ignore
self.activation_alpha[0] # type: ignore
if self.activation_alpha is not None and len(self.activation_alpha) > 0 # type: ignore
else None,
self.activation_beta[0] # type: ignore
if self.activation_beta is not None and len(self.activation_beta) > 0 # type: ignore
else None,
)
if len(self.activations) > 1: # type: ignore
self.f2 = self.choose_act(
self.activations[1], # type: ignore
self.activation_alpha[1] # type: ignore
if self.activation_alpha is not None and len(self.activation_alpha) > 1 # type: ignore
else None,
self.activation_beta[1] # type: ignore
if self.activation_beta is not None and len(self.activation_beta) > 1 # type: ignore
else None,
)
self.n_outputs = len(onnx_node.output)
def choose_act(self, name, alpha, beta): # type: ignore
if name in ("Tanh", "tanh"):
return self._f_tanh
if name in ("Affine", "affine"):
return lambda x: x * alpha + beta
raise RuntimeError(f"Unknown activation function {name!r}.")
def _f_tanh(self, x): # type: ignore
return np.tanh(x)
def _step(self, X, R, B, W, H_0): # type: ignore
h_list = []
H_t = H_0
for x in np.split(X, X.shape[0], axis=0):
H = self.f1(
np.dot(x, np.transpose(W))
+ np.dot(H_t, np.transpose(R))
+ np.add(*np.split(B, 2))
)
h_list.append(H)
H_t = H
concatenated = np.concatenate(h_list)
if self.num_directions == 1:
output = np.expand_dims(concatenated, 1)
return output, h_list[-1]
def _run( # type: ignore
self,
X,
W,
R,
B=None,
sequence_lens=None,
initial_h=None,
activation_alpha=None,
activation_beta=None,
activations=None,
clip=None,
direction=None,
hidden_size=None,
layout=None,
):
# TODO: support overridden attributes.
self.num_directions = W.shape[0]
if self.num_directions == 1:
R = np.squeeze(R, axis=0)
W = np.squeeze(W, axis=0)
if B is not None:
B = np.squeeze(B, axis=0)
if sequence_lens is not None:
sequence_lens = np.squeeze(sequence_lens, axis=0)
if initial_h is not None:
initial_h = np.squeeze(initial_h, axis=0)
hidden_size = R.shape[-1]
batch_size = X.shape[1]
X = X if layout == 0 else np.swapaxes(X, 0, 1)
b = B if B is not None else np.zeros(2 * hidden_size, dtype=X.dtype)
h_0 = (
initial_h
if initial_h is not None
else np.zeros((batch_size, hidden_size), dtype=X.dtype)
)
B = b
H_0 = h_0
else:
raise NotImplementedError(
f"Unsupported value {self.num_directions} for num_directions and operator {self.__class__.__name__!r}."
)
Y, Y_h = self._step(X, R, B, W, H_0)
if layout == 1:
Y = np.transpose(Y, [2, 0, 1, 3])
Y_h = Y[:, :, -1, :]
Y = Y.astype(X.dtype)
return (Y,) if self.n_outputs == 1 else (Y, Y_h)
class RNN_7(CommonRNN):
pass
class RNN_14(CommonRNN):
pass
|
[
"noreply@github.com"
] |
onnx.noreply@github.com
|
10307bb15711fd8b79dd45b49797dbad958413d1
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_3/blsmic004/question4.py
|
71ab4ab7d932bf5d278d61266de70a716f569693
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 541
|
py
|
# Find palindromic primes between given values
# Michele Balestra BLSMIC004
# 23 March 2014
N = eval(input("Enter the starting point N:\n"))
M = eval(input("Enter the ending point M:\n"))
print("The palindromic primes are:")
for i in range(N+1,M):
strI = str(i)
if i==2:
print(i)
elif i==1: continue
elif i%2==0:continue
elif strI==strI[-1::-1]:
for j in range(2,int(i**0.5)+1):
if i%j==0:
break
else:
print(i)
else:
pass
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
a29d3e7de9b847c9928e92986e748f06fa3419d8
|
f33192647643bf11f13936866d80d573189bd72f
|
/Easy/Kids With the Greatest Number of Candies.py
|
f499a7528ba409bd0f7b31b60b726b5016c81386
|
[] |
no_license
|
komalupatil/Leetcode_Solutions
|
d8a10bd8ea341dc51dbcdacfd1f2b9a4bb65032d
|
62d20010ebdb91c15ecbcf5fb38e308511f26499
|
refs/heads/master
| 2022-05-19T00:13:31.540076
| 2022-04-23T01:45:11
| 2022-04-23T01:45:11
| 194,587,808
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 439
|
py
|
#Leetcode 1431. Kids With the Greatest Number of Candies
class Solution:
def kidsWithCandies(self, candies: List[int], extraCandies: int) -> List[bool]:
maxCandy = max(candies)
result = []
for i in range(len(candies)):
if candies[i] == maxCandy or candies[i]+ extraCandies >= maxCandy:
result.append(True)
else:
result.append(False)
return result
|
[
"ku.patil31@gmail.com"
] |
ku.patil31@gmail.com
|
45f46ed66d7154c90c87712a64c1ac8d6e923aa3
|
aa0270b351402e421631ebc8b51e528448302fab
|
/sdk/databox/azure-mgmt-databox/azure/mgmt/databox/v2021_12_01/operations/_operations.py
|
502f0faa6cfebf14fef2aa7238b3bc1804394cec
|
[
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
fangchen0601/azure-sdk-for-python
|
d04a22109d0ff8ff209c82e4154b7169b6cb2e53
|
c2e11d6682e368b2f062e714490d2de42e1fed36
|
refs/heads/master
| 2023-05-11T16:53:26.317418
| 2023-05-04T20:02:16
| 2023-05-04T20:02:16
| 300,440,803
| 0
| 0
|
MIT
| 2020-10-16T18:45:29
| 2020-10-01T22:27:56
| null |
UTF-8
|
Python
| false
| false
| 6,717
|
py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import DataBoxManagementClientMixinABC, _convert_request
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-12-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-12-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.DataBox/operations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.databox.v2021_12_01.DataBoxManagementClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.Operation"]:
"""This method gets all the operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Operation or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.databox.v2021_12_01.models.Operation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-12-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-12-01"))
cls: ClsType[_models.OperationList] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("OperationList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ApiError, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/providers/Microsoft.DataBox/operations"}
|
[
"noreply@github.com"
] |
fangchen0601.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.